[Pkg-ofed-commits] r299 - in branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current: . .git/logs .git/logs/refs/heads .git/objects/info .git/refs/heads .git/refs/tags drivers/infiniband/debug drivers/infiniband/ulp/sdp drivers/net/mlx4 fs kernel_addons/backport kernel_addons/backport/2.6.16/include/linux kernel_addons/backport/2.6.16_sles10/include/linux kernel_addons/backport/2.6.16_sles10_sp1/include/linux kernel_addons/backport/2.6.16_sles10_sp2/include/asm-generic kernel_addons/backport/2.6.16_sles10_sp2/include/linux kernel_addons/backport/2.6.16_sles10_sp2/include/net kernel_addons/backport/2.6.16_sles10_sp2/include/src kernel_addons/backport/2.6.17/include/linux kernel_addons/backport/2.6.18/include/linux kernel_addons/backport/2.6.18-EL5.1/include kernel_addons/backport/2.6.18-EL5.1/include/asm kernel_addons/backport/2.6.18-EL5.1/include/asm-generic kernel_addons/backport/2.6.18-EL5.1/include/linux kernel_addons/backport/2.6.18-EL5.1/include/linux/unaligned kernel_addons/backport/2.6.18-EL5.1/include/net kernel_addons/backport/2.6.18-EL5.1/include/scsi kernel_addons/backport/2.6.18-EL5.1/include/src kernel_addons/backport/2.6.18-EL5.2/include kernel_addons/backport/2.6.18-EL5.2/include/asm kernel_addons/backport/2.6.18-EL5.2/include/asm-generic kernel_addons/backport/2.6.18-EL5.2/include/linux kernel_addons/backport/2.6.18-EL5.2/include/linux/unaligned kernel_addons/backport/2.6.18-EL5.2/include/net kernel_addons/backport/2.6.18-EL5.2/include/scsi kernel_addons/backport/2.6.18-EL5.2/include/src kernel_addons/backport/2.6.18-EL5.3 kernel_addons/backport/2.6.18-EL5.3/include kernel_addons/backport/2.6.18-EL5.3/include/asm kernel_addons/backport/2.6.18-EL5.3/include/asm-generic kernel_addons/backport/2.6.18-EL5.3/include/linux kernel_addons/backport/2.6.18-EL5.3/include/linux/unaligned kernel_addons/backport/2.6.18-EL5.3/include/net kernel_addons/backport/2.6.18-EL5.3/include/scsi kernel_addons/backport/2.6.18-EL5.3/include/src kernel_addons/backport/2.6.18_FC6/include/linux kernel_addons/backport/2.6.18_suse10_2/include/linux kernel_addons/backport/2.6.19/include/linux kernel_addons/backport/2.6.20/include/linux kernel_addons/backport/2.6.21/include/linux kernel_addons/backport/2.6.22/include kernel_addons/backport/2.6.22/include/linux kernel_addons/backport/2.6.22/include/net kernel_addons/backport/2.6.22/include/src kernel_addons/backport/2.6.22_suse10_3/include/linux kernel_addons/backport/2.6.24/include kernel_addons/backport/2.6.24/include/asm kernel_addons/backport/2.6.24/include/linux kernel_addons/backport/2.6.24/include/linux/unaligned kernel_addons/backport/2.6.25/include kernel_addons/backport/2.6.25/include/asm kernel_addons/backport/2.6.25/include/linux kernel_addons/backport/2.6.25/include/linux/unaligned kernel_addons/backport/2.6.25/include/net kernel_addons/backport/2.6.25/include/src kernel_addons/backport/2.6.26/include/linux kernel_addons/backport/2.6.9_U4/include/linux kernel_addons/backport/2.6.9_U5/include/linux kernel_addons/backport/2.6.9_U6/include/linux kernel_addons/backport/2.6.9_U7/include/linux kernel_patches/backport kernel_patches/backport/2.6.16 kernel_patches/backport/2.6.16_sles10 kernel_patches/backport/2.6.16_sles10_sp1 kernel_patches/backport/2.6.16_sles10_sp2 kernel_patches/backport/2.6.17 kernel_patches/backport/2.6.18 kernel_patches/backport/2.6.18-EL5.1 kernel_patches/backport/2.6.18-EL5.2 kernel_patches/backport/2.6.18-EL5.3 kernel_patches/backport/2.6.18_FC6 kernel_patches/backport/2.6.18_suse10_2 kernel_patches/backport/2.6.19 kernel_patches/backport/2.6.20 kernel_patches/backport/2.6.21 kernel_patches/backport/2.6.22 kernel_patches/backport/2.6.22_suse10_3 kernel_patches/backport/2.6.23 kernel_patches/backport/2.6.24 kernel_patches/backport/2.6.25 kernel_patches/backport/2.6.26 kernel_patches/backport/2.6.27_sles11 kernel_patches/backport/2.6.9_U4 kernel_patches/backport/2.6.9_U5 kernel_patches/backport/2.6.9_U6 kernel_patches/backport/2.6.9_U7 kernel_patches/fixes net/rds ofed_scripts

Guy Coates gmpc-guest at alioth.debian.org
Sat May 30 10:58:06 UTC 2009


Author: gmpc-guest
Date: 2009-05-30 10:58:06 +0000 (Sat, 30 May 2009)
New Revision: 299

Added:
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/objects/info/alternates
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc1
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc4
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc6
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc7
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc8
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc1
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc2
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc3
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc4
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc5
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc6
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc8
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc9
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc1
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc2
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc3
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc4
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc5
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc7
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc8
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc9
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.27-rc6
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.3.1-rc1
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc1
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc2
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc3
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc4
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc5
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc6
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_frag.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_lro.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/spinlock_types.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/asm-generic/atomic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/backing-dev.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/capability.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/completion.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/dcache.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/err.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/exportfs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/gfp.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/highmem.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/magic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mpage.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/namei.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/pagemap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/proc_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/radix-tree.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sched.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/security.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/seq_file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/string.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/swap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/wait.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/udp.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/namespace.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/socket.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/strndup.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/writeback.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm-generic/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm-generic/atomic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm/unaligned.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/backing-dev.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/capability.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/completion.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/err.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fcntl.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/highmem.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/magic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mount.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mpage.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/namei.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/pagemap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/proc_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/radix-tree.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sched.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/security.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/seq_file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/string.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/swap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/unaligned/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/unaligned/access_ok.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/wait.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/rtnetlink.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/udp.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_device.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_host.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_transport.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/namespace.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/writeback.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm-generic/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm-generic/atomic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm/unaligned.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/backing-dev.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/capability.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/completion.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/err.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/freezer.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/highmem.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/jiffies.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/list.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/magic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mount.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mpage.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/namei.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/pagemap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/path.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/proc_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/radix-tree.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sched.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/security.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/seq_file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/string.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/swap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/unaligned/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/unaligned/access_ok.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/wait.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/ipv6.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/rtnetlink.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/udp.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_device.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_transport.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/namespace.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/writeback.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm-generic/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm-generic/atomic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/prom.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/scatterlist.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/unaligned.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/backing-dev.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/capability.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/compiler.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/completion.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/crypto.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-attrs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-mapping.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/etherdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/freezer.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/genalloc.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_ether.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_vlan.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/in.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet_lro.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/interrupt.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/jiffies.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kernel.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kobject.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/list.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/log2.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/magic.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mm.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mount.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mpage.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/namei.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/net.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netlink.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/notifier.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pagemap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/path.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pci.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/proc_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/random.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rbtree.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rculist.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/scatterlist.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sched.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/security.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/semaphore.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/seq_file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/skbuff.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/slab.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/smp_lock.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/string.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/swap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sysctl.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/types.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/unaligned/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/unaligned/access_ok.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/wait.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/workqueue.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/checksum.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ip.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ipv6.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/neighbour.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/net_namespace.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/route.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/rtnetlink.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/udp.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_cmnd.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_device.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_transport.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/genalloc.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/namespace.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/writeback.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/list.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/backing-dev.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/capability.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/completion.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/err.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/freezer.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/highmem.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/jiffies.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/kernel.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mount.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/namei.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/net.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/pagemap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/path.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/proc_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/radix-tree.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/sched.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/security.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/seq_file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/string.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/swap.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/wait.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/ipv6.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/udp.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/src/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/src/namespace.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/asm/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/asm/unaligned.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/unaligned/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/unaligned/access_ok.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/asm/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/asm/unaligned.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/backing-dev.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/file.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/kernel.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/mount.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/proc_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/unaligned/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/unaligned/access_ok.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/net/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/net/ipv6.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/src/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/src/namespace.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/rnfs_fs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/2_misc_device_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cma_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_0_sysfs_to_2_6_25.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_1_kobject_backport.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_kobject_unregister_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_3_sysfs_to_2_6_18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_undo_weak_ordering.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0004_undo_240.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0008_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0020_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0030_sset.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0100_remove_lro.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0110_provider_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/dma_mapping_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-00-revert_inhibit_dmem.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-01-ibmebus_loc_code.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca_02_revert_interface_change.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0010_revert_pid.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0020_class_dev_to_device.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0030_revert_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0040_nopage_to_fault.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0050_aio_write.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0060_htirq.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0070_vmalloc_user-2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0080_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0095_pat.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0110_restore_get_stats.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0900_netif_lock_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20_umcast.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_lro_to_2.6.23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_mcast_set_pkey_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_skb_to_2_6_20.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_x_neigh_cleanup.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_02_count_fmr_align_violations.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_01_sync_kernel_code_with_2.6.26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_02_fix_iscsi_if_h.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_cxgb3_0010_states.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/linux_genalloc_to_2_6_20.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0030_smp_call_function.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0040_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0050_wc.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0060_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0120_ethtool_interface.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0000_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0001_pcix_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0010_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/nes_0010_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_01_class_device.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_02_dev_stats.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rds_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rnfs_fs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0100_revert_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0120_revert_2_6_27_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0100_revert_role_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0200_revert_srp_transport_to_2.6.23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_class_device_if.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_cmd_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srpt_class_dev.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_1_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_2_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/rnfs_fs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/rnfs_fs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.27_sles11/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.27_sles11/to_sles11.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_x_001_2_6_9_disable_coal.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_x_001_2_6_9_disable_coal.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_x_001_2_6_9_disable_coal.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_2_z010_sysfs_to_2.6.18.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_z0010_sysfs_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_x_001_2_6_9_disable_coal.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_210_to_2_6_21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0099_no_multiqueue.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0200_create_cm_id_even_when_port_is_down.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0300_use_rate_from_ipoib_bcast_when_join_mcast.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0220_sysfs_lifetime.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0230_Fix_RMPP_header_RRespTime_manipulation.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0240_fix_null_pointer_dereference_in_local_completions.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0250_initialize_mad_agent_priv_before_putting_on_lists.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0260_ib_post_send_mad_returns_zero_no_completion.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0270_sa_query_update_sm_ah_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0280_warnings_fs_nfs_nfsroot.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00600_sfpplus.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00700_firmware_to_7_4.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0090_mr_refcount.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0100_prefunit.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0110_sdmagenmismatch.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0120_ipath_do_user_init.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0130_freezemode.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0140_pcie_coalesce.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0150_ibc_back_to_back_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0160_rc_spinlock.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0170_ruc_loopback.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0180_dma_mapping_error.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0190_rwqe_error_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0200_rcvhdrqalign.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0205_user_sdma_generation.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0210_bad_unmap_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0220_bad_dma_free_coherent_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0230_rc_send_comp_len.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0240_rc_mr_refcount.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0250_rc_src_path.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0260_sdma_callback.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0270_mr_zero_refcount.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0280_user_sdma_head_optim.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0290_fix_rc_unaligned_pkts.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0300_limit_stats_output.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0310_rdma_read_mr.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0320_fix_gsi_pkey_index.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0330_rc_send_mr.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0340_pkey_change_event.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0350_iowrite32_copy_x86_64.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0360_rdma_read.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0520_join_task_tempfix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0530_unicast_crash.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0540_mcast_attach_ignore_eagain.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0550_fix_napi_poll_race.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0560_clear_admin_up_flag.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0200_sgl_pbl_offset-calculation_needs_64_bits.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0300_connection_termination_fixes.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0400_remove_modulo_math_from_build_rdma_recv.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0500_Release_dependent_resources.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0600_Adjust_ordird_if_needed_for_peer2peer_connections.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0700_Dont_zero_the_qp_attrs_when_moving_to_IDLE.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0800_flush_sq_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0640_port_configuration_documentation.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0650_fix_taking_SL_field_of_cqe_sl_vid.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0660_no_IB_device_registration_without_ib_ports.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0670_invoke_sense_function_from_sysfs_context.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1010_Fibre-Channel-support.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1020_query_internal_dev.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1030_register_vlan_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1050_remove_fexch_reservation.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1060_set_4k_mtu.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1070-optimize-huge_tlb.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1080_no_bf_without_wc.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1090_do_not_set_ethernet_port.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100-mtt_seg_size_param.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100_unregister_IB_device_before_CLOSE_PORT.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1110_added_transceiver_type_to_QUERY_PORT.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1120_fast_reg.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0030-mtt_seg_size_param.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0040_fix_cmd_timeouts.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0130_pbl_accounting.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0140_new_pbl_scheme.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0150_ibv_devinfo.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0160_aeq.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0170_cleanup_tx.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0180_lsmm.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0190_dev_alloc_skb.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0200_dyn_conn.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0210_no_proc_fin.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0220_sfp_plus.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0230_bnt.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0240_mem_reg.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0250_cast.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0260_version.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0270_netdev_stop.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0280_sfp_plus_d.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0290_rc4.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0300_1_inch.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0310_no_dyn_int_mod.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0320_fmr.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0330_init_rd_atom.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.28_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_00.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_01.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_02.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_03.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_04.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0100_lockd_ref_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0200_iova_truncate_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0300_dma_direction.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0400_clean_up_error_paths.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0500_unmap_len_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0600_access_local_write_cx.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_cm.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_rdma.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_recv.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_ring.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_send.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_stats.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_sysctl.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds_rdma.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib.conf
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib_sysctl
Removed:
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/fs/nfsctl.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/lockd/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_mount.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfsd/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sunrpc/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/lockd/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs_sb.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfsd/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sunrpc/
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_net.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_net.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0410_enable_lro.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0080_accept_pend_cnt.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0120_free_skb.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rds.h
Modified:
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/HEAD
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/master
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/ofed_kernel
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/master
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/ofed_kernel
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/BUILD_ID
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/mtrack.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_bcopy.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_cma.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_main.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_cq.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_main.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_netdev.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_params.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_port.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_resources.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_rx.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_tx.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/mlx4_en.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/log2.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/freezer.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inet.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kallsyms.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kernel.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/log2.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mm.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mount.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/net.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/path.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/scatterlist.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/skbuff.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sysctl.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/types.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/workqueue.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/ipv6.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.17/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/crypto.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fs.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/if_vlan.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/inet.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/kernel.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/list.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/log2.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mm.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/net.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/path.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/scatterlist.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/skbuff.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sysctl.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/workqueue.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/crypto.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/if_vlan.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/inet.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kernel.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kobject.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/log2.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mm.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/net.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/scatterlist.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/skbuff.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sysctl.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/workqueue.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/log2.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_suse10_2/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.19/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.20/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.21/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mm.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/scatterlist.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/workqueue.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22_suse10_3/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/if_vlan.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/if_vlan.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.26/include/linux/if_vlan.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/inetdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/netdevice.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_fs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_fs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipoib_to_2.6.23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipoib_to_2.6.23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipoib_to_2.6.23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0095_pat.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0100_iowrite32_copy.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_300_to_2_6_13.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_400_to_2_6_9.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_0060_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/t3_hw_to_2_6_5-7_244.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0100_iowrite32_copy.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_300_to_2_6_13.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_400_to_2_6_9.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_0060_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/t3_hw_to_2_6_5-7_244.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0100_iowrite32_copy.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_300_to_2_6_13.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_400_to_2_6_9.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_0060_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/t3_hw_to_2_6_5-7_244.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0002_undo_250.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0010_napi.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0100_iowrite32_copy.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0100_to_2.6.21.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_050_to_2_6_24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_100_to_2_6_23.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_200_to_2_6_22.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_300_to_2_6_13.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_400_to_2_6_9.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_0060_sysfs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0100_to_2.6.24.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0110_no_set_flags.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/sdp_7277_to_2_6_11.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/t3_hw_to_2_6_5-7_244.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0100_unified_tcp_ports.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0050_pd_locking_fix.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0055_pat.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0500_do_not_join_broadcast_group_if_interface_is_brought_down.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0010_add_wc.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0420_Auto-negotiation-support.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0500_VPI_makefile.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0600_sense_from_sysfs_context.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0010_cqp_request.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0020_connected_nodes_list.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0030_tx-free_list.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0040_race_condition.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0050_stale_APBVT.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0060_TCP_compliance.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0070_cqp_avail_reqs.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0090_warning_cleanup.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0100_make_cm_node_loopback_check.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0110_copyright.patch
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Kconfig
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Makefile
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/af_rds.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/bind.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/cong.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/connection.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_cm.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rdma.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_recv.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_ring.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_send.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_stats.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_sysctl.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/message.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/page.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds.h
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/recv.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/send.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/stats.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/sysctl.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/threads.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/transport.c
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/Makefile
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/checkout_files
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/configure
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/connectx_port_config
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/get_backport_dir.sh
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/makefile
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/mlx4_en_sysctl
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofa_kernel.spec
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofed_patch.sh
   branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/openibd
Log:
[svn-upgrade] Integrating new upstream version, ofa-kernel (1.4.1)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/HEAD
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/HEAD	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/HEAD	2009-05-30 10:58:06 UTC (rev 299)
@@ -1 +1 @@
-0000000000000000000000000000000000000000 88ab7955605c5e769e760f6bec980e0c2e72aa5c Vladimir Sokolovsky (Mellanox) <vlad at hosting.openfabrics.org> 1228913636 -0800
+0000000000000000000000000000000000000000 868661b127c355c64066a796460a7380a722dd84 Vladimir Sokolovsky (Mellanox) <vlad at hosting.openfabrics.org> 1243438058 -0700

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/master
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/master	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/master	2009-05-30 10:58:06 UTC (rev 299)
@@ -1 +1 @@
-0000000000000000000000000000000000000000 88ab7955605c5e769e760f6bec980e0c2e72aa5c Vladimir Sokolovsky (Mellanox) <vlad at hosting.openfabrics.org> 1228913636 -0800
+0000000000000000000000000000000000000000 868661b127c355c64066a796460a7380a722dd84 Vladimir Sokolovsky (Mellanox) <vlad at hosting.openfabrics.org> 1243438058 -0700

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/ofed_kernel
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/ofed_kernel	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/logs/refs/heads/ofed_kernel	2009-05-30 10:58:06 UTC (rev 299)
@@ -1 +1 @@
-0000000000000000000000000000000000000000 88ab7955605c5e769e760f6bec980e0c2e72aa5c Vladimir Sokolovsky (Mellanox) <vlad at hosting.openfabrics.org> 1228913624 -0800	clone: from git://git.openfabrics.org/ofed_1_4/linux-2.6.git
+0000000000000000000000000000000000000000 868661b127c355c64066a796460a7380a722dd84 Vladimir Sokolovsky (Mellanox) <vlad at hosting.openfabrics.org> 1243438036 -0700	clone: from git://git.openfabrics.org/ofed_1_4/linux-2.6.git

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/objects/info/alternates
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/objects/info/alternates	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/objects/info/alternates	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+/pub/scm/ofed_1_4/linux-2.6.git/objects

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/master
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/master	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/master	2009-05-30 10:58:06 UTC (rev 299)
@@ -1 +1 @@
-88ab7955605c5e769e760f6bec980e0c2e72aa5c
+868661b127c355c64066a796460a7380a722dd84

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/ofed_kernel
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/ofed_kernel	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/heads/ofed_kernel	2009-05-30 10:58:06 UTC (rev 299)
@@ -1 +1 @@
-88ab7955605c5e769e760f6bec980e0c2e72aa5c
+868661b127c355c64066a796460a7380a722dd84

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc1
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc1	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc1	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+cebdeed27b068dcc3e7c311d7ec0d9c33b5138c2

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc4
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc4	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc4	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+b6fa40f5916811c6aad6625c384d26fd01135014

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc6
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc6	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc6	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+f49e4e249d57ddfa97e046bc5c994ef72c93e63b

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc7
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc7	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc7	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+fcb31af14662059db467201ec73dfbb6f3300342

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc8
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc8	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.24-rc8	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+c9ba0caa9650a1898c839a79f6ff96a8a982424c

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc1
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc1	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc1	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+abf6976c818c553eb2209fe32028a4c5eecab0cb

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc2
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc2	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc2	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+b74415eac8d3f1fcb39ad4bcef0c829635a3bc9f

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc3
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc3	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc3	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+d622f5379e88a3bac4f8decfa49c0a04a8e209d3

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc4
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc4	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc4	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+c6c155e032361b0031943141b1a6f231e4f63817

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc5
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc5	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc5	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+cd81f35c48b7e0c2a871f88e1973f391f8330449

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc6
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc6	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc6	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+b22f07f908a648c864b16d2ba71f03aba4b684c9

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc8
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc8	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc8	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+e39586f39c2829d30f4ea6680a846dfe4aad2f2e

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc9
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc9	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.25-rc9	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+3df83da958163beeca00d1254f512fafd79a19ed

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc1
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc1	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc1	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+d6b7f73ed134769c86966697e61b235b200cc4ae

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc2
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc2	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc2	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+b67fc588ce611ca847620bd1353bf2d68fc3027f

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc3
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc3	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc3	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+b041a30258df00c90ac1ed532cec3f25c00a3ce8

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc4
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc4	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc4	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+e21868a4cdd93e5883ff61579d4cd799d1a3c244

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc5
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc5	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc5	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+9ab8267ac47ce50b932cc4b1cbd9b05e2faac8b7

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc7
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc7	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc7	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+f40883d058ed196976285fc1fd5fd6c85dcb5bef

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc8
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc8	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc8	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+496a3db2bfb98f1e9c7b73514d8d25790f69f5fb

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc9
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc9	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.26-rc9	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+c22689a6f45beff21b97df566e0da17b4fa9ec19

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.27-rc6
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.27-rc6	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/v2.6.27-rc6	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+89c44b4a5ad50f8b85846b16af5f977f3861d197

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.3.1-rc1
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.3.1-rc1	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.3.1-rc1	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+794133e49a61b6db81743bfd6b2d2153570898c2

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+88ab7955605c5e769e760f6bec980e0c2e72aa5c

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc1
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc1	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc1	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+03577d57a3e3ce59320c5ee61a94a6af4497dca1

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc2
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc2	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc2	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+5b4c450b6ed9c63355a893e585e9621f8854c3b3

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc3
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc3	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc3	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+47e7e7f31acd66e1ffda25e58b51608e99d465fb

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc4
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc4	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc4	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+4f26035d707538bb6584a99350c98c2247b10dda

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc5
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc5	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc5	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+d4c83dc22055d824178658cb5c472c3c75f30d2f

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc6
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc6	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/.git/refs/tags/vofed-1.4.1-rc6	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1 @@
+868661b127c355c64066a796460a7380a722dd84

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/BUILD_ID
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/BUILD_ID	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/BUILD_ID	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,3 +1,3 @@
 Git:
 git://git.openfabrics.org/ofed_1_4/linux-2.6.git ofed_kernel
-commit 88ab7955605c5e769e760f6bec980e0c2e72aa5c
+commit 868661b127c355c64066a796460a7380a722dd84

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -76,6 +76,13 @@
 module_param(strict_track_mask, ulong, 0444);
 MODULE_PARM_DESC(strict_track_mask, "bitmask which allocation requires strict tracking");
 
+/* Sets the frequency of allocations failures simulations
+   if set to 0 all allocation should succeed */
+static unsigned int fail_freq = 0;
+module_param(fail_freq, uint, 0444);
+MODULE_PARM_DESC(fail_freq, "Allocation failure frequency, default is 0 (disabled)");
+
+
 typedef struct memtrack_meminfo_st {
         unsigned long addr;
         unsigned long size;
@@ -179,7 +186,7 @@
 /* Invoke on memory allocation */
 void memtrack_alloc(memtrack_memtype_t memtype, unsigned long addr,
                     unsigned long size, const char *filename,
-                    const unsigned long line_num, int alloc_flags)
+                    const unsigned long line_num, int alloc_flags, int *fail)
 {
         unsigned long hash_val;
         memtrack_meminfo_t *cur_mem_info_p, *new_mem_info_p;
@@ -252,6 +259,12 @@
         list_add(&new_mem_info_p->list, &obj_desc_p->tracked_objs_head);
 
         memtrack_spin_unlock(&obj_desc_p->hash_lock, flags);
+
+	if (fail_freq) {
+		if (!(jiffies % fail_freq)) {
+			*fail = 1;
+		}
+	}
         return;
 }
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/memtrack.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -32,7 +32,7 @@
 /* Invoke on memory allocation */
 void memtrack_alloc(memtrack_memtype_t memtype, unsigned long addr,
                     unsigned long size, const char *filename,
-                    const unsigned long line_num, int alloc_flags);
+                    const unsigned long line_num, int alloc_flags, int *fail);
 
 /* Invoke on memory free */
 void memtrack_free(memtrack_memtype_t memtype, unsigned long addr,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/mtrack.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/mtrack.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/debug/mtrack.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -22,10 +22,15 @@
 #else
 #define kzalloc(size, flags) ({ \
         void *__memtrack_addr;                 \
+	int fail = 0; \
                                 \
         __memtrack_addr = kzalloc(size, flags); \
         if ( __memtrack_addr && (size)) {                               \
-                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), size, __FILE__, __LINE__, flags); \
+                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), size, __FILE__, __LINE__, flags, &fail); \
+                if (fail) {                      \
+                        kfree(__memtrack_addr);  \
+                        __memtrack_addr = NULL;  \
+                }                                \
         }                                                                     \
         __memtrack_addr;                                                                              \
 })
@@ -37,10 +42,15 @@
 #else
 #define kcalloc(n, size, flags) ({ \
         void *__memtrack_addr;                 \
+	int fail = 0; \
                                 \
         __memtrack_addr = kcalloc(n, size, flags); \
         if ( __memtrack_addr && (size)) {                               \
-                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), (n)*(size), __FILE__, __LINE__, flags); \
+                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), (n)*(size), __FILE__, __LINE__, flags, &fail); \
+                if (fail) {                      \
+                        kfree(__memtrack_addr);  \
+                        __memtrack_addr = NULL;  \
+                }                                \
         }                                                                     \
         __memtrack_addr;                                                                              \
 })
@@ -51,20 +61,30 @@
 #if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,9)
 #define kmalloc(sz, flgs) ({ \
         void *__memtrack_addr;                 \
+	int fail = 0; \
                                 \
         __memtrack_addr = kmalloc(sz, flgs); \
         if ( __memtrack_addr && (sz)) {                               \
-                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), sz, __FILE__, __LINE__, flgs); \
+                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), sz, __FILE__, __LINE__, flgs, &fail); \
+                if (fail) {                      \
+                        kfree(__memtrack_addr);  \
+                        __memtrack_addr = NULL;  \
+                }                                \
         }                                                                     \
         __memtrack_addr;                                                                              \
 })
 #else
 #define kmalloc(sz, flgs) ({ \
         void *__memtrack_addr;                 \
+	int fail = 0; \
                                 \
         __memtrack_addr = kmalloc(sz, flgs); \
         if ( __memtrack_addr ) {                               \
-                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), sz, __FILE__, __LINE__, flgs); \
+                memtrack_alloc(MEMTRACK_KMALLOC, (unsigned long)(__memtrack_addr), sz, __FILE__, __LINE__, flgs, &fail); \
+                if (fail) {                      \
+                        kfree(__memtrack_addr);  \
+                        __memtrack_addr = NULL;  \
+                }                                \
         }                                                                     \
         __memtrack_addr;                                                                              \
 })
@@ -96,10 +116,15 @@
 
 #define vmalloc(size) ({ \
         void *__memtrack_addr;                 \
+	int fail = 0; \
                                 \
         __memtrack_addr = vmalloc(size); \
         if ( __memtrack_addr ) {                               \
-                memtrack_alloc(MEMTRACK_VMALLOC, (unsigned long)(__memtrack_addr), size, __FILE__, __LINE__, GFP_ATOMIC); \
+                memtrack_alloc(MEMTRACK_VMALLOC, (unsigned long)(__memtrack_addr), size, __FILE__, __LINE__, GFP_ATOMIC, &fail); \
+                if (fail) {                      \
+                        vfree(__memtrack_addr);  \
+                        __memtrack_addr = NULL;  \
+                }                                \
         }                                                                     \
         __memtrack_addr;                                                                              \
 })
@@ -116,10 +141,15 @@
 
 #define kmem_cache_alloc(cache, flags) ({ \
         void *__memtrack_addr;         \
+	int fail = 0; \
                                 \
         __memtrack_addr = kmem_cache_alloc(cache, flags); \
         if ( __memtrack_addr ) {                               \
-                memtrack_alloc(MEMTRACK_KMEM_OBJ, (unsigned long)(__memtrack_addr), 1, __FILE__, __LINE__, flags); \
+                memtrack_alloc(MEMTRACK_KMEM_OBJ, (unsigned long)(__memtrack_addr), 1, __FILE__, __LINE__, flags, &fail); \
+                if (fail) {                      \
+                        kmem_cache_free(cache, __memtrack_addr); \
+                        __memtrack_addr = NULL;           \
+                }                                         \
         }                                                                     \
         __memtrack_addr;                                                                              \
 })

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -172,17 +172,20 @@
 
 	int destructed_already;
 	int sdp_disconnect;
+	int destruct_in_process;
 
-	/* Data below will be reset on error */
+	struct sdp_buf *rx_ring;
+	struct sdp_buf   *tx_ring;
+
 	/* rdma specific */
-	struct rdma_cm_id *id;
 	struct ib_qp *qp;
 	struct ib_cq *cq;
 	struct ib_mr *mr;
+	/* Data below will be reset on error */
+	struct rdma_cm_id *id;
 	struct ib_device *ib_device;
 
 	/* SDP specific */
-	struct sdp_buf *rx_ring;
 	struct ib_recv_wr rx_wr;
 	unsigned rx_head;
 	unsigned rx_tail;
@@ -194,7 +197,6 @@
 	int               remote_credits;
 	int 		  poll_cq;
 
-	struct sdp_buf   *tx_ring;
 	unsigned          tx_head;
 	unsigned          tx_tail;
 	struct ib_send_wr tx_wr;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -330,7 +330,13 @@
 		frag = &skb_shinfo(skb)->frags[i];
 		frag->page                = page;
 		frag->page_offset         = 0;
-		frag->size                =  min(PAGE_SIZE, SDP_MAX_PAYLOAD);
+
+		/* Bugzilla 1311 */
+		if ( sizeof(frag->size) < 4 )
+			frag->size = min(PAGE_SIZE, SDP_MAX_PAYLOAD);
+		else
+			frag->size = PAGE_SIZE;
+
 		++skb_shinfo(skb)->nr_frags;
 		skb->len += frag->size;
 		skb->data_len += frag->size;
@@ -380,9 +386,13 @@
 
 void sdp_post_recvs(struct sdp_sock *ssk)
 {
+	struct sock *sk = &ssk->isk.sk;
 	int scale = ssk->rcvbuf_scale;
-	if (unlikely(!ssk->id))
+
+	if (unlikely(!ssk->id || ((1 << sk->sk_state) & 
+		(TCPF_CLOSE | TCPF_TIME_WAIT)))) {
 		return;
+	}
 
 	if (top_mem_usage &&
 	    (top_mem_usage * 0x100000) < atomic_read(&sdp_current_mem_usage) * PAGE_SIZE)
@@ -391,8 +401,7 @@
 	while ((likely(ssk->rx_head - ssk->rx_tail < SDP_RX_SIZE) &&
 		(ssk->rx_head - ssk->rx_tail - SDP_MIN_BUFS) *
 		(SDP_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE) +
-		ssk->rcv_nxt - ssk->copied_seq <
-		ssk->isk.sk.sk_rcvbuf * scale) ||
+		ssk->rcv_nxt - ssk->copied_seq < sk->sk_rcvbuf * scale) ||
 	       unlikely(ssk->rx_head - ssk->rx_tail < SDP_MIN_BUFS))
 		sdp_post_recv(ssk);
 }
@@ -595,6 +604,7 @@
 	ssk->recv_frags = PAGE_ALIGN(new_size - SDP_HEAD_SIZE) / PAGE_SIZE;
 	if (ssk->recv_frags > SDP_MAX_SEND_SKB_FRAGS)
 		ssk->recv_frags = SDP_MAX_SEND_SKB_FRAGS;
+	ssk->rcvbuf_scale = rcvbuf_scale;
 
 	sdp_post_recvs(ssk);
 
@@ -603,18 +613,17 @@
 
 int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size)
 {
+	skb_frag_t skb_frag;
 	u32 curr_size = SDP_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE;
-#if defined(__ia64__)
-	/* for huge PAGE_SIZE systems, aka IA64, limit buffers size
-	   [re-]negotiation to a known+working size that will not
-	   trigger a HW error/rc to be interpreted as a IB_WC_LOC_LEN_ERR */
-	u32 max_size = (SDP_HEAD_SIZE + SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE) <=
-		32784 ?
-		(SDP_HEAD_SIZE + SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE): 32784;
-#else 
 	u32 max_size = SDP_HEAD_SIZE + SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE;
-#endif
 
+	/* Bugzilla 1311, Kernels using smaller fragments must reject
+	 * re-size requests larger than 32k to prevent being sent
+	 * fragment larger than the receive buffer fragment.
+	 */
+	if ( (sizeof(skb_frag.size) < 4) && (max_size > 0x8000))
+		max_size = 0x8000;
+
 	if (new_size > curr_size && new_size <= max_size &&
 	    sdp_get_large_socket(ssk)) {
 		ssk->rcvbuf_scale = rcvbuf_scale;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_cma.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_cma.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -131,7 +131,7 @@
 	if (!sdp_sk(sk)->rx_ring) {
 		rc = -ENOMEM;
 		sdp_warn(sk, "Unable to allocate RX Ring size %zd.\n",
-			 sizeof *sdp_sk(sk)->rx_ring * SDP_TX_SIZE);
+			 sizeof *sdp_sk(sk)->rx_ring * SDP_RX_SIZE);
 		goto err_rx;
 	}
 
@@ -216,8 +216,6 @@
 
 	sdp_init_sock(child);
 
-	sdp_add_sock(sdp_sk(child));
-
 	dst_addr = (struct sockaddr_in *)&id->route.addr.dst_addr;
 	inet_sk(child)->dport = dst_addr->sin_port;
 	inet_sk(child)->daddr = dst_addr->sin_addr.s_addr;
@@ -227,10 +225,13 @@
 
 	rc = sdp_init_qp(child, id);
 	if (rc) {
-		sk_common_release(child);
+		sdp_sk(child)->destructed_already = 1;
+		sk_free(child);
 		return rc;
 	}
 
+	sdp_add_sock(sdp_sk(child));
+
 	sdp_sk(child)->max_bufs = sdp_sk(child)->bufs = ntohs(h->bsdh.bufs);
 	sdp_sk(child)->min_bufs = sdp_sk(child)->bufs / 4;
 	sdp_sk(child)->xmit_size_goal = ntohl(h->localrcvsz) -

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_main.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/infiniband/ulp/sdp/sdp_main.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -143,6 +143,8 @@
 
 static DEFINE_RWLOCK(device_removal_lock);
 
+static inline void sdp_start_dreq_wait_timeout(struct sdp_sock *ssk, int timeo);
+
 static inline unsigned int sdp_keepalive_time_when(const struct sdp_sock *ssk)
 {
 	return ssk->keepalive_time ? : sdp_keepalive_time;
@@ -344,8 +346,6 @@
 	if (!(sk->sk_shutdown & RCV_SHUTDOWN) || !sk_stream_memory_free(sk))
 		sdp_set_error(sk, rc);
 
-	sdp_destroy_qp(ssk);
-
 	memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
 
 	sk->sk_state_change(sk);
@@ -450,9 +450,7 @@
 static void sdp_send_disconnect(struct sock *sk)
 {
 	sock_hold(sk, SOCK_REF_DREQ_TO);
-	queue_delayed_work(sdp_workqueue, &sdp_sk(sk)->dreq_wait_work,
-			   SDP_FIN_WAIT_TIMEOUT);
-	sdp_sk(sk)->dreq_wait_timeout = 1;
+	sdp_start_dreq_wait_timeout(sdp_sk(sk), SDP_FIN_WAIT_TIMEOUT);
 
 	sdp_sk(sk)->sdp_disconnect = 1;
 	sdp_post_sends(sdp_sk(sk), 0);
@@ -842,18 +840,27 @@
 	return put_user(answ, (int __user *)arg); 
 }
 
+static inline void sdp_start_dreq_wait_timeout(struct sdp_sock *ssk, int timeo)
+{
+	sdp_dbg(&ssk->isk.sk, "Starting dreq wait timeout\n");
+
+	queue_delayed_work(sdp_workqueue, &ssk->dreq_wait_work, timeo);
+	ssk->dreq_wait_timeout = 1;
+}
+
 void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk)
 {
 	if (!ssk->dreq_wait_timeout)
 		return;
 
-	sdp_dbg(&ssk->isk.sk, "cancelling dreq wait timeout #####\n");
+	sdp_dbg(&ssk->isk.sk, "cancelling dreq wait timeout\n");
 
 	ssk->dreq_wait_timeout = 0;
 	if (cancel_delayed_work(&ssk->dreq_wait_work)) {
 		/* The timeout hasn't reached - need to clean ref count */
 		sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
 	}
+
 	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
 }
 
@@ -904,7 +911,7 @@
 	if (sdp_sk(sk)->id) {
 		rdma_disconnect(sdp_sk(sk)->id);
 	} else {
-		sdp_warn(sk, "DO NOT SENDING DREQ - no need to wait for timewait exit\n");
+		sdp_warn(sk, "NOT SENDING DREQ - no need to wait for timewait exit\n");
 		sock_put(sk, SOCK_REF_CM_TW);
 	}
 
@@ -929,6 +936,7 @@
 	ssk->tx_ring = NULL;
 	ssk->sdp_disconnect = 0;
 	ssk->destructed_already = 0;
+	ssk->destruct_in_process = 0;
 	spin_lock_init(&ssk->lock);
 
 	return 0;
@@ -2479,13 +2487,31 @@
 		}
 	}
 
+kill_socks:
 	list_for_each(p, &sock_list) {
 		ssk = list_entry(p, struct sdp_sock, sock_list);
-		if (ssk->ib_device == device) {
+
+		if (ssk->ib_device == device && !ssk->destruct_in_process) {
+			ssk->destruct_in_process = 1;
 			sk = &ssk->isk.sk;
 
+			sdp_cancel_dreq_wait_timeout(ssk);
+
+			spin_unlock_irq(&sock_list_lock);
+
 			sk->sk_shutdown |= RCV_SHUTDOWN;
 			sdp_reset(sk);
+			if ((1 << sk->sk_state) & 
+				(TCPF_FIN_WAIT1 | TCPF_CLOSE_WAIT |
+				 TCPF_LAST_ACK | TCPF_TIME_WAIT)) {
+				sock_put(sk, SOCK_REF_CM_TW);
+			}
+			
+			schedule();
+
+			spin_lock_irq(&sock_list_lock);
+
+			goto kill_socks;
 		}
 	}
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_cq.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_cq.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_cq.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -53,13 +53,18 @@
 	cq->size = entries;
 	if (mode == RX) {
 		cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
-		cq->vector = ring % mdev->dev->caps.num_comp_vectors;
+		cq->vector = (ring + priv->port) %
+			mdev->dev->caps.num_comp_vectors;
 	} else {
 		cq->buf_size = sizeof(struct mlx4_cqe);
 		cq->vector = MLX4_LEAST_ATTACHED_VECTOR;
 	}
 	cq->ring = ring;
 	cq->is_tx = mode;
+	if (priv->rx_ring[ring].use_frags)
+		cq->process_cq = mlx4_en_process_rx_cq;
+	else
+		cq->process_cq = mlx4_en_process_rx_cq_skb;
 	spin_lock_init(&cq->lock);
 
 	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
@@ -139,7 +144,6 @@
 
 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
 {
-	cq->armed = 1;
 	mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
 		    &priv->mdev->uar_lock);
 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_frag.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_frag.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_frag.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/etherdevice.h>
+
+#include "mlx4_en.h"
+
+
+static struct mlx4_en_ipfrag *find_session(struct mlx4_en_rx_ring *ring,
+					   struct iphdr *iph)
+{
+	struct mlx4_en_ipfrag *session;
+	int i;
+
+	for (i = 0; i < MLX4_EN_NUM_IPFRAG_SESSIONS; i++) {
+		session = &ring->ipfrag[i];
+		if (session->fragments == NULL)
+			continue;
+		if (session->daddr == iph->daddr &&
+		    session->saddr == iph->saddr &&
+		    session->id == iph->id &&
+		    session->protocol == iph->protocol) {
+			return session;
+		}
+	}
+	return NULL;
+}
+
+static struct mlx4_en_ipfrag *start_session(struct mlx4_en_rx_ring *ring,
+					    struct iphdr *iph)
+{
+	struct mlx4_en_ipfrag *session;
+	int index = -1;
+	int i;
+
+	for (i = 0; i < MLX4_EN_NUM_IPFRAG_SESSIONS; i++) {
+		if (ring->ipfrag[i].fragments == NULL) {
+			index = i;
+			break;
+		}
+	}
+	if (index < 0)
+		return NULL;
+
+	session = &ring->ipfrag[index];
+
+	return session;
+}
+
+
+static void flush_session(struct mlx4_en_priv *priv,
+			  struct mlx4_en_ipfrag *session,
+			  u16 more)
+{
+	struct sk_buff *skb = session->fragments;
+	struct iphdr *iph = ip_hdr(skb);
+	struct net_device *dev = skb->dev;
+
+	/* Update IP length and checksum */
+	iph->tot_len = htons(session->total_len);
+	iph->frag_off = htons(more | (session->offset >> 3));
+	iph->check = 0;
+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+	if (session->vlan)
+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
+					 be16_to_cpu(session->sl_vid));
+	else
+		netif_receive_skb(skb);
+	dev->last_rx = jiffies;
+	session->fragments = NULL;
+	session->last = NULL;
+}
+
+
+static inline void frag_append(struct mlx4_en_priv *priv,
+			       struct mlx4_en_ipfrag *session,
+			       struct sk_buff *skb,
+			       unsigned int data_len)
+{
+	struct sk_buff *parent = session->fragments;
+
+	/* Update skb bookkeeping */
+	parent->len += data_len;
+	parent->data_len += data_len;
+	session->total_len += data_len;
+
+	skb_pull(skb, skb->len - data_len);
+	parent->truesize += skb->truesize;
+
+	if (session->last)
+		session->last->next = skb;
+	else
+		skb_shinfo(parent)->frag_list = skb;
+
+	session->last = skb;
+}
+
+int mlx4_en_rx_frags(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
+		     struct sk_buff *skb, struct mlx4_cqe *cqe)
+{
+	struct mlx4_en_ipfrag *session;
+	struct iphdr *iph;
+	u16 ip_len;
+	u16 ip_hlen;
+	int data_len;
+	u16 offset;
+
+	skb_reset_network_header(skb);
+	skb_reset_transport_header(skb);
+	iph = ip_hdr(skb);
+	ip_len = ntohs(iph->tot_len);
+	ip_hlen = iph->ihl * 4;
+	data_len = ip_len - ip_hlen;
+	offset = ntohs(iph->frag_off);
+	offset &= IP_OFFSET;
+	offset <<= 3;
+
+	session = find_session(ring, iph);
+	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) {
+		if (session)
+			flush_session(priv, session, IP_MF);
+		return -EINVAL;
+	}
+	if (session) {
+		if (unlikely(session->offset + session->total_len !=
+			     offset + ip_hlen)) {
+			flush_session(priv, session, IP_MF);
+			goto new_session;
+		}
+		/* Packets smaller then 60 bytes are padded to that size
+		 * Need to fix len field of the skb to fit the actual data size
+		 * Since ethernet header already removed, the IP total length
+		 * is exactly the data size (the skb is linear)
+		 */
+		skb->len = ip_len;
+
+		frag_append(priv, session, skb, data_len);
+	} else {
+new_session:
+		session = start_session(ring, iph);
+		if (unlikely(!session))
+			return -ENOSPC;
+
+		session->fragments = skb;
+		session->daddr = iph->daddr;
+		session->saddr = iph->saddr;
+		session->id = iph->id;
+		session->protocol = iph->protocol;
+		session->total_len = ip_len;
+		session->offset = offset;
+		session->vlan = (priv->vlgrp &&
+				 (be32_to_cpu(cqe->vlan_my_qpn) &
+				  MLX4_CQE_VLAN_PRESENT_MASK)) ? 1 : 0;
+		session->sl_vid = cqe->sl_vid;
+	}
+	if (!(ntohs(iph->frag_off) & IP_MF))
+		flush_session(priv, session, 0);
+	else if (session->fragments->len + priv->dev->mtu > 65536)
+		flush_session(priv, session, IP_MF);
+
+	return 0;
+}
+
+
+void mlx4_en_flush_frags(struct mlx4_en_priv *priv,
+			 struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_ipfrag *session;
+	int i;
+
+	for (i = 0; i < MLX4_EN_NUM_IPFRAG_SESSIONS; i++) {
+		session = &ring->ipfrag[i];
+		if (session->fragments)
+			flush_session(priv, session, IP_MF);
+	}
+}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_lro.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_lro.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_lro.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+
+#include "mlx4_en.h"
+
+/* LRO hash function - using sum of source and destination port LSBs is
+ * good enough */
+#define LRO_INDEX(th, size) \
+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
+
+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
+
+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
+{
+	int i;
+	int size, size2;
+	struct sk_buff *skb = lro->skb;
+	skb_frag_t *frags;
+	int len, len2;
+	int cur_skb = 0;
+
+	/* Sum fragment sizes of first skb */
+	len = skb->len;
+	size = skb_headlen(skb);
+	frags = skb_shinfo(skb)->frags;
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		size += frags[i].size;
+	}
+
+	/* Add in fragments of linked skb's */
+	skb = skb_shinfo(skb)->frag_list;
+	while (skb) {
+		cur_skb++;
+		len2 = skb->len;
+		if (skb_headlen(skb)) {
+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
+				  "in fraglist (skb:%d)\n", cur_skb);
+			return;
+		}
+
+		size2 = 0;
+		frags = skb_shinfo(skb)->frags;
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			size2 += frags[i].size;
+		}
+
+		if (size2 != len2) {
+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
+			return;
+		}
+		size += size2;
+		skb = skb->next;
+	}
+
+	if (size != len)
+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
+}
+#endif /* MLX4_EN_DEBUG_LRO */
+
+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
+{
+	struct sk_buff *skb = lro->skb;
+	struct iphdr *iph = (struct iphdr *) skb->data;
+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
+	unsigned int headlen = skb_headlen(skb);
+	__wsum tcp_hdr_csum;
+	u32 *ts;
+
+	/* Update IP length and checksum */
+	iph->tot_len = htons(lro->tot_len);
+	iph->check = 0;
+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+	/* Update latest TCP ack, window, psh, and timestamp */
+	th->ack_seq = lro->ack_seq;
+	th->window = lro->window;
+	th->psh = !!lro->psh;
+	if (lro->has_timestamp) {
+		ts = (u32 *) (th + 1);
+		ts[1] = htonl(lro->tsval);
+		ts[2] = lro->tsecr;
+	}
+	th->check = 0;
+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+				      lro->tot_len - (iph->ihl << 2),
+				      IPPROTO_TCP, lro->data_csum);
+
+	/* Update skb */
+	skb->len = lro->tot_len;
+	skb->data_len = lro->tot_len - headlen;
+	skb->truesize = skb->len + sizeof(struct sk_buff);
+	skb_shinfo(skb)->gso_size = lro->mss;
+
+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
+	mlx4_en_lro_validate(priv, lro);
+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
+
+	/* Push it up the stack */
+	if (priv->vlgrp && lro->has_vlan)
+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
+					be16_to_cpu(lro->vlan_prio));
+	else
+		netif_receive_skb(skb);
+	priv->dev->last_rx = jiffies;
+
+	/* Increment stats */
+	priv->port_stats.lro_flushed++;
+
+	/* Move session back to the free list */
+	hlist_del(&lro->node);
+	hlist_del(&lro->flush_node);
+	hlist_add_head(&lro->node, &ring->lro_free);
+}
+
+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
+{
+	struct mlx4_en_lro *lro;
+	struct hlist_node *node, *tmp;
+
+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
+		if (all || time_after(jiffies, lro->expires))
+			mlx4_en_lro_flush_single(priv, ring, lro);
+	}
+}
+
+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
+				   struct mlx4_en_lro *lro,
+				   struct mlx4_en_rx_desc *rx_desc,
+				   struct skb_frag_struct *skb_frags,
+				   struct mlx4_en_rx_alloc *page_alloc,
+				   unsigned int data_len,
+				   int hlen)
+{
+	struct sk_buff *skb = lro->skb_last;
+	struct skb_shared_info *info;
+	struct skb_frag_struct *frags_copy;
+	int nr_frags;
+
+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
+		return -ENOMEM;
+
+	info = skb_shinfo(skb);
+
+	/* Copy fragments from descriptor ring to skb */
+	frags_copy = info->frags + info->nr_frags;
+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
+						frags_copy,
+						page_alloc,
+						data_len + hlen);
+	if (!nr_frags) {
+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
+		return -ENOMEM;
+	}
+
+	/* Skip over headers */
+	frags_copy[0].page_offset += hlen;
+
+	if (nr_frags == 1)
+		frags_copy[0].size = data_len;
+	else {
+		/* Adjust size of last fragment to match packet length.
+		 * Note: if this fragment is also the first one, the
+		 *       operation is completed in the next line */
+		frags_copy[nr_frags - 1].size = hlen + data_len -
+				priv->frag_info[nr_frags - 1].frag_prefix_size;
+
+		/* Adjust size of first fragment */
+		frags_copy[0].size -= hlen;
+	}
+
+	/* Update skb bookkeeping */
+	skb->len += data_len;
+	skb->data_len += data_len;
+	info->nr_frags += nr_frags;
+	return 0;
+}
+
+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
+						       struct mlx4_en_rx_ring *ring,
+						       struct iphdr *iph,
+						       struct tcphdr *th)
+{
+	struct mlx4_en_lro *lro;
+	struct hlist_node *node;
+	int index = LRO_INDEX(th, mdev->profile.num_lro);
+	struct hlist_head *list = &ring->lro_hash[index];
+
+	hlist_for_each_entry(lro, node, list, node) {
+		if (lro->sport_dport == *((u32*) &th->source) &&
+		    lro->saddr == iph->saddr &&
+		    lro->daddr == iph->daddr)
+			return lro;
+	}
+	return NULL;
+}
+
+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
+							struct mlx4_en_rx_ring *ring)
+{
+	return hlist_empty(&ring->lro_free) ? NULL :
+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
+}
+
+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
+					struct tcphdr *th, int len)
+{
+	__wsum tcp_csum;
+	__wsum tcp_hdr_csum;
+	__wsum tcp_ps_hdr_csum;
+
+	tcp_csum = ~csum_unfold(th->check);
+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
+
+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+					     len + (th->doff << 2),
+					     IPPROTO_TCP, 0);
+
+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
+			tcp_ps_hdr_csum);
+}
+
+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
+					  struct mlx4_en_rx_desc *rx_desc,
+					  struct skb_frag_struct *skb_frags,
+					  unsigned int length,
+					  struct mlx4_cqe *cqe)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_lro *lro;
+	struct sk_buff *skb;
+	struct iphdr *iph;
+	struct tcphdr *th;
+	dma_addr_t dma;
+	int tcp_hlen;
+	int tcp_data_len;
+	int hlen;
+	u16 ip_len;
+	void *va;
+	u32 *ts;
+	u32 seq;
+	u32 tsval = (u32) ~0UL;
+	u32 tsecr = 0;
+	u32 ack_seq;
+	u16 window;
+
+	/* This packet is eligible for LRO if it is:
+	 * - DIX Ethernet (type interpretation)
+	 * - TCP/IP (v4)
+	 * - without IP options
+	 * - not an IP fragment */
+	if (!mlx4_en_can_lro(cqe->status))
+			return -1;
+
+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
+	 * with no VLAN (HW stripped it) and no IP options */
+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+	iph = va + ETH_HLEN;
+	th = (struct tcphdr *)(iph + 1);
+
+	/* Synchronsize headers for processing */
+	dma = be64_to_cpu(rx_desc->data[0].addr);
+#define MAX_LRO_HEADER		(ETH_HLEN + \
+				 sizeof(*iph) + \
+				 sizeof(*th) + \
+				 TCPOLEN_TSTAMP_ALIGNED)
+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
+
+	/* We only handle aligned timestamp options */
+	tcp_hlen = (th->doff << 2);
+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
+		ts = (u32*) (th + 1);
+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
+					  (TCPOPT_NOP << 16) |
+					  (TCPOPT_TIMESTAMP << 8) |
+					  TCPOLEN_TIMESTAMP)))
+			goto sync_device;
+		tsval = ntohl(ts[1]);
+		tsecr = ts[2];
+	} else if (tcp_hlen != sizeof(*th))
+		goto sync_device;
+	
+
+	/* At this point we know we have a TCP packet that is likely to be
+	 * eligible for LRO. Therefore, see now if we have an oustanding
+	 * session that corresponds to this packet so we could flush it if
+	 * something still prevents LRO */
+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
+
+	/* ensure no bits set besides ack or psh */
+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
+	    th->cwr || !th->ack) {
+		if (lro) {
+			/* First flush session to keep packets in-order */
+			mlx4_en_lro_flush_single(priv, ring, lro);
+		}
+		goto sync_device;
+	}
+
+	/* Get ip length and verify that the frame is big enough */
+	ip_len = ntohs(iph->tot_len);
+	if (unlikely(length < ETH_HLEN + ip_len)) {
+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
+		goto sync_device;
+	}
+
+	/* Get TCP payload length */
+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
+	seq = ntohl(th->seq);
+	if (!tcp_data_len)
+		goto flush_session;
+
+	if (lro) {
+		/* Check VLAN tag */
+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
+				mlx4_en_lro_flush_single(priv, ring, lro);
+				goto sync_device;
+			}
+		} else if (lro->has_vlan) {
+			mlx4_en_lro_flush_single(priv, ring, lro);
+			goto sync_device;
+		}
+
+		/* Check sequence number */
+		if (unlikely(seq != lro->next_seq)) {
+			mlx4_en_lro_flush_single(priv, ring, lro);
+			goto sync_device;
+		}
+
+		/* If the cummulative IP length is over 64K, flush and start
+		 * a new session */
+		if (lro->tot_len + tcp_data_len > 0xffff) {
+			mlx4_en_lro_flush_single(priv, ring, lro);
+			goto new_session;
+		}
+
+		/* Check timestamps */
+		if (tcp_hlen != sizeof(*th)) {
+			if (unlikely(lro->tsval > tsval || !tsecr))
+				goto sync_device;
+		}
+
+		window = th->window;
+		ack_seq = th->ack_seq;
+		if (likely(tcp_data_len)) {
+			/* Append the data! */
+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
+							ring->page_alloc,
+							tcp_data_len, hlen)) {
+				mlx4_en_lro_flush_single(priv, ring, lro);
+				goto sync_device;
+			}
+		} else {
+			/* No data */
+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
+							 0, MAX_LRO_HEADER,
+							 DMA_FROM_DEVICE);
+		}
+
+		/* Update session */
+		lro->psh |= th->psh;
+		lro->next_seq += tcp_data_len;
+		lro->data_csum = csum_block_add(lro->data_csum,
+					mlx4_en_lro_tcp_data_csum(iph, th,
+								  tcp_data_len),
+					lro->tot_len);
+		lro->tot_len += tcp_data_len;
+		lro->tsval = tsval;
+		lro->tsecr = tsecr;
+		lro->ack_seq = ack_seq;
+		lro->window = window;
+		if (tcp_data_len > lro->mss)
+			lro->mss = tcp_data_len;
+		priv->port_stats.lro_aggregated++;
+		if (th->psh)
+			mlx4_en_lro_flush_single(priv, ring, lro);
+		return 0;
+	}
+
+new_session:
+	if (th->psh)
+		goto sync_device;
+	lro = mlx4_en_lro_alloc_session(priv, ring);
+	if (lro) {
+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
+							     ETH_HLEN + ip_len);
+		if (skb) {
+			int index;
+
+			/* Add in the skb */
+			lro->skb = skb;
+			lro->skb_last = skb;
+			skb->protocol = eth_type_trans(skb, priv->dev);
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+			/* Initialize session */
+			lro->saddr = iph->saddr;
+			lro->daddr = iph->daddr;
+			lro->sport_dport = *((u32*) &th->source);
+
+			lro->next_seq = seq + tcp_data_len;
+			lro->tot_len = ip_len;
+			lro->psh = th->psh;
+			lro->ack_seq = th->ack_seq;
+			lro->window = th->window;
+			lro->mss = tcp_data_len;
+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
+						tcp_data_len);
+
+			/* Handle vlans */
+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
+				lro->vlan_prio = cqe->sl_vid;
+				lro->has_vlan = 1;
+			} else
+				lro->has_vlan = 0;
+
+			/* Handle timestamps */
+			if (tcp_hlen != sizeof(*th)) {
+				lro->tsval = tsval;
+				lro->tsecr = tsecr;
+				lro->has_timestamp = 1;
+			} else {
+				lro->tsval = (u32) ~0UL;
+				lro->has_timestamp = 0;
+			}
+
+			/* Activate this session */
+			lro->expires = jiffies + HZ / 25;
+			hlist_del(&lro->node);
+			index = LRO_INDEX(th, mdev->profile.num_lro);
+
+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
+			priv->port_stats.lro_aggregated++;
+			return 0;
+		} else {
+			/* Packet is dropped because we were not able to allocate new
+			 * page for fragments */
+			dma_sync_single_range_for_device(&mdev->pdev->dev, dma,
+							 0, MAX_LRO_HEADER,
+							 DMA_FROM_DEVICE);
+			return 0;
+		}
+	} else {
+		priv->port_stats.lro_no_desc++;
+	}
+
+flush_session:
+	if (lro)
+		mlx4_en_lro_flush_single(priv, ring, lro);
+sync_device:
+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
+	return -1;
+}
+
+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
+{
+	struct mlx4_en_lro *lro;
+	struct hlist_node *node, *tmp;
+
+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
+		hlist_del(&lro->node);
+		kfree(lro);
+	}
+	kfree(ring->lro_hash);
+}
+
+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
+{
+	struct mlx4_en_lro *lro;
+	int i;
+
+	INIT_HLIST_HEAD(&ring->lro_free);
+	INIT_HLIST_HEAD(&ring->lro_flush);
+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
+				 GFP_KERNEL);
+	if (!ring->lro_hash)
+		return -ENOMEM;
+
+	for (i = 0; i < num_lro; i++) {
+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
+		if (!lro) {
+			mlx4_en_lro_destroy(ring);
+			return -ENOMEM;
+		}
+		INIT_HLIST_NODE(&lro->node);
+		INIT_HLIST_NODE(&lro->flush_node);
+		hlist_add_head(&lro->node, &ring->lro_free);
+	}
+	return 0;
+}
+
+

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_main.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_main.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_main.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -165,18 +165,16 @@
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
 		mdev->port_cnt++;
 
-	/* If we did not receive an explicit number of Rx rings, default to
-	 * the number of completion vectors populated by the mlx4_core */
+	/* Number of RX rings is the minimum between:
+	 * number of completion vextors + 1 (for default ring)
+	 * and MAX_RX_RINGS */
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
 		mlx4_info(mdev, "Using %d tx rings for port:%d\n",
 			  mdev->profile.prof[i].tx_ring_num, i);
-		if (!mdev->profile.prof[i].rx_ring_num) {
-			mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors;
-			mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
-				  dev->caps.num_comp_vectors, i);
-		} else
-			mlx4_info(mdev, "Using %d rx rings for port:%d\n",
-				  mdev->profile.prof[i].rx_ring_num, i);
+		mdev->profile.prof[i].rx_ring_num =
+			min_t(int, dev->caps.num_comp_vectors + 1, MAX_RX_RINGS);
+		mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
+			  mdev->profile.prof[i].rx_ring_num, i);
 	}
 
 	/* Create our own workqueue for reset/multicast tasks
@@ -185,7 +183,7 @@
 	mdev->workqueue = create_singlethread_workqueue("mlx4_en");
 	if (!mdev->workqueue) {
 		err = -ENOMEM;
-		goto err_close_nic;
+		goto err_mr;
 	}
 
 	/* At this stage all non-port specific tasks are complete:
@@ -218,9 +216,8 @@
 	flush_workqueue(mdev->workqueue);
 
 	/* Stop event queue before we drop down to release shared SW state */
-
-err_close_nic:
 	destroy_workqueue(mdev->workqueue);
+
 err_mr:
 	mlx4_mr_free(dev, &mdev->mr);
 err_uar:

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_netdev.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_netdev.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_netdev.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -334,7 +334,10 @@
 		cq = &priv->rx_cq[i];
 		spin_lock_irqsave(&cq->lock, flags);
 		napi_synchronize(&cq->napi);
-		mlx4_en_process_rx_cq(dev, cq, 0);
+		if (priv->rx_ring[i].use_frags)
+			mlx4_en_process_rx_cq(dev, cq, 0);
+		else
+			mlx4_en_process_rx_cq_skb(dev, cq, 0);
 		spin_unlock_irqrestore(&cq->lock, flags);
 	}
 }
@@ -348,11 +351,9 @@
 	if (netif_msg_timer(priv))
 		mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
 
-	if (netif_carrier_ok(dev)) {
-		priv->port_stats.tx_timeout++;
-		mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
-		queue_work(mdev->workqueue, &priv->watchdog_task);
-	}
+	priv->port_stats.tx_timeout++;
+	mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
+	queue_work(mdev->workqueue, &priv->watchdog_task);
 }
 
 
@@ -369,7 +370,6 @@
 
 void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
 {
-	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_cq *cq;
 	int i;
 
@@ -379,15 +379,8 @@
 	 *   satisfy our coelsing target.
 	 * - moder_time is set to a fixed value.
 	 */
-	priv->rx_frames = (mdev->profile.rx_moder_cnt ==
-			   MLX4_EN_AUTO_CONF) ?
-				MLX4_EN_RX_COAL_TARGET /
-				priv->dev->mtu + 1 :
-				mdev->profile.rx_moder_cnt;
-	priv->rx_usecs = (mdev->profile.rx_moder_time ==
-			  MLX4_EN_AUTO_CONF) ?
-				MLX4_EN_RX_COAL_TIME :
-				mdev->profile.rx_moder_time;
+	priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1;
+	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
 	mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
 			     "rx_frames:%d rx_usecs:%d\n",
 		 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -411,7 +404,7 @@
 	priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
 	priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
 	priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
-	priv->adaptive_rx_coal = mdev->profile.auto_moder;
+	priv->adaptive_rx_coal = 1;
 	priv->last_moder_time = MLX4_EN_AUTO_CONF;
 	priv->last_moder_jiffies = 0;
 	priv->last_moder_packets = 0;
@@ -569,7 +562,6 @@
 	struct mlx4_en_rx_ring *rx_ring;
 	int rx_index = 0;
 	int tx_index = 0;
-	u16 stride;
 	int err = 0;
 	int i;
 	int j;
@@ -583,8 +575,6 @@
 	dev->mtu = min(dev->mtu, priv->max_mtu);
 	mlx4_en_calc_rx_buf(dev);
 	mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
-	stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
-				    DS_SIZE * priv->num_frags);
 	/* Configure rx cq's and rings */
 	for (i = 0; i < priv->rx_ring_num; i++) {
 		cq = &priv->rx_cq[i];
@@ -593,7 +583,7 @@
 		err = mlx4_en_activate_cq(priv, cq);
 		if (err) {
 			mlx4_err(mdev, "Failed activating Rx CQ\n");
-			goto rx_err;
+			goto cq_err;
 		}
 		for (j = 0; j < cq->size; j++)
 			cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
@@ -654,6 +644,16 @@
 		++tx_index;
 	}
 
+	for (i = 0; i < MLX4_EN_TX_HASH_SIZE; i++) {
+		memset(&priv->tx_hash[i], 0, sizeof(struct mlx4_en_tx_hash_entry));
+		/*
+		 * Initially, all streams are assigned to the rings
+		 * that should handle the small packages streams, (the lower ring
+		 * indixes) then moved according the stream charasteristics.
+		 */
+		priv->tx_hash[i].ring = i & (MLX4_EN_NUM_HASH_RINGS / 2 - 1);
+	}
+
 	/* Configure port */
 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
 				    priv->rx_skb_size + ETH_FCS_LEN,
@@ -769,9 +769,14 @@
 	struct net_device *dev = priv->dev;
 
 	mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
-	mlx4_en_stop_port(dev);
-	if (mlx4_en_start_port(dev))
-	    mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+
+	mutex_lock(&mdev->state_lock);
+	if (priv->port_up) {
+		mlx4_en_stop_port(dev);
+		if (mlx4_en_start_port(dev))
+		    mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+	}
+	mutex_unlock(&mdev->state_lock);
 }
 
 
@@ -834,7 +839,7 @@
 	return 0;
 }
 
-static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 {
 	int i;
 
@@ -853,7 +858,7 @@
 	}
 }
 
-static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_port_profile *prof = priv->prof;
@@ -872,12 +877,17 @@
 
 	/* Create rx Rings */
 	for (i = 0; i < priv->rx_ring_num; i++) {
+		if (i > 0)
+			priv->rx_ring[i].use_frags = 1;
+		else
+			priv->rx_ring[i].use_frags = 0;
+
 		if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
 				      prof->rx_ring_size, i, RX))
 			goto err;
 
 		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
-					   prof->rx_ring_size, priv->stride))
+					   prof->rx_ring_size))
 			goto err;
 	}
 
@@ -961,7 +971,7 @@
 	int i;
 	int err;
 
-	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
 	if (dev == NULL) {
 		mlx4_err(mdev, "Net device allocation failed\n");
 		return -ENOMEM;
@@ -1005,8 +1015,6 @@
 		goto out;
 	}
 
-	priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
-					  DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
 	err = mlx4_en_alloc_resources(priv);
 	if (err)
 		goto out;
@@ -1024,7 +1032,8 @@
 	priv->allocated = 1;
 
 	/* Populate Tx priority mappings */
-	mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
+	mlx4_en_set_prio_map(priv, priv->tx_prio_map,
+			     prof->tx_ring_num - MLX4_EN_NUM_HASH_RINGS);
 
 	/*
 	 * Initialize netdev entry points
@@ -1033,6 +1042,7 @@
 	dev->open = &mlx4_en_open;
 	dev->stop = &mlx4_en_close;
 	dev->hard_start_xmit = &mlx4_en_xmit;
+	dev->select_queue = &mlx4_en_select_queue;
 	dev->get_stats = &mlx4_en_get_stats;
 	dev->set_multicast_list = &mlx4_en_set_multicast;
 	dev->set_mac_address = &mlx4_en_set_mac;
@@ -1057,7 +1067,7 @@
 	 * Set driver features
 	 */
 	dev->features |= NETIF_F_SG;
-	dev->features |= NETIF_F_HW_CSUM;
+	dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	dev->features |= NETIF_F_HIGHDMA;
 	dev->features |= NETIF_F_HW_VLAN_TX |
 			 NETIF_F_HW_VLAN_RX |

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_params.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_params.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_params.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -53,90 +53,41 @@
 MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
 
 /* RSS hash type mask - default to <saddr, daddr, sport, dport> */
-MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
+MLX4_EN_PARM_INT(rss_mask, 0x5, "RSS hash type bitmask");
 
 /* Number of LRO sessions per Rx ring (rounded up to a power of two) */
 MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
 		 "Number of LRO sessions per ring or disabled (0)");
 
+/* Allow reassembly of fragmented IP packets */
+MLX4_EN_PARM_INT(ip_reasm, 1, "Allow reassembly of fragmented IP packets (!0)");
+
 /* Priority pausing */
-MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
-		 "Pause policy on TX: 0 never generate pause frames "
-		 "1 generate pause frames according to RX buffer threshold");
-MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
-		 "Pause policy on RX: 0 ignore received pause frames "
-		 "1 respect received pause frames");
 MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
 			   " Per priority bit mask");
 MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
 			   " Per priority bit mask");
 
-/* Interrupt moderation tunning */
-MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
-	       "Max coalesced descriptors for Rx interrupt moderation");
-MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
-	       "Timeout following last packet for Rx interrupt moderation");
-MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
-
-MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
-MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
-
-MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
-MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
-MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
-MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
-
-
 int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
 {
 	struct mlx4_en_profile *params = &mdev->profile;
 	int i;
 
-	params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
-	params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
-	params->auto_moder = auto_moder;
 	params->rss_xor = (rss_xor != 0);
 	params->rss_mask = rss_mask & 0x1f;
 	params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+	params->ip_reasm = ip_reasm;
 	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
-		params->prof[i].rx_pause = pprx;
+		params->prof[i].rx_pause = 1;
 		params->prof[i].rx_ppp = pfcrx;
-		params->prof[i].tx_pause = pptx;
+		params->prof[i].tx_pause = 1;
 		params->prof[i].tx_ppp = pfctx;
+		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+		params->prof[i].tx_ring_num = MLX4_EN_NUM_HASH_RINGS + 1 +
+			(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
 	}
-	if (pfcrx || pfctx) {
-		params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
-		params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
-	} else {
-		params->prof[1].tx_ring_num = 1;
-		params->prof[2].tx_ring_num = 1;
-	}
-	params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
-	params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
 
-	if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
-		tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
-	params->prof[1].tx_ring_size =
-		(tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
-		 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
-
-	if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
-		tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
-	params->prof[2].tx_ring_size =
-		(tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
-		 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
-
-	if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
-		rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
-	params->prof[1].rx_ring_size =
-		(rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
-		 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
-
-	if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
-		rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
-	params->prof[2].rx_ring_size =
-		(rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
-		 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
 	return 0;
 }
 
@@ -145,21 +96,6 @@
  * Ethtool support
  */
 
-static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
-{
-	int i;
-
-	priv->port_stats.lro_aggregated = 0;
-	priv->port_stats.lro_flushed = 0;
-	priv->port_stats.lro_no_desc = 0;
-
-	for (i = 0; i < priv->rx_ring_num; i++) {
-		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
-		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
-		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
-	}
-}
-
 static void
 mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
 {
@@ -269,8 +205,6 @@
 
 	spin_lock_bh(&priv->stats_lock);
 
-	mlx4_en_update_lro_stats(priv);
-
 	for (i = 0; i < NUM_MAIN_STATS; i++)
 		data[index++] = ((unsigned long *) &priv->stats)[i];
 	for (i = 0; i < NUM_PORT_STATS; i++)
@@ -324,9 +258,12 @@
 
 static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int trans_type = priv->mdev->dev->caps.trans_type[priv->port];
+
 	cmd->autoneg = AUTONEG_DISABLE;
 	cmd->supported = SUPPORTED_10000baseT_Full;
-	cmd->advertising = SUPPORTED_10000baseT_Full;
+	cmd->advertising = ADVERTISED_10000baseT_Full;
 	if (netif_carrier_ok(dev)) {
 		cmd->speed = SPEED_10000;
 		cmd->duplex = DUPLEX_FULL;
@@ -334,6 +271,21 @@
 		cmd->speed = -1;
 		cmd->duplex = -1;
 	}
+
+	if (trans_type > 0 && trans_type <= 0xC) {
+		cmd->port = PORT_FIBRE;
+		cmd->transceiver = XCVR_EXTERNAL;
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->advertising |= ADVERTISED_FIBRE;
+	} else if (trans_type == 0x80 || trans_type == 0) {
+		cmd->port = PORT_TP;
+		cmd->transceiver = XCVR_INTERNAL;
+		cmd->supported |= SUPPORTED_TP;
+		cmd->advertising |= ADVERTISED_TP;
+	} else  {
+		cmd->port = -1;
+		cmd->transceiver = -1;
+	}
 	return 0;
 }
 
@@ -433,14 +385,64 @@
 	pause->rx_pause = priv->prof->rx_pause;
 }
 
+static int mlx4_en_set_ringparam(struct net_device *dev,
+				 struct ethtool_ringparam *param)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	u32 rx_size, tx_size;
+	int port_up = 0;
+	int err = 0;
+
+	if (param->rx_jumbo_pending || param->rx_mini_pending)
+		return -EINVAL;
+
+	rx_size = roundup_pow_of_two(param->rx_pending);
+	rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
+	rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
+	tx_size = roundup_pow_of_two(param->tx_pending);
+	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
+	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
+
+	if (rx_size == priv->prof->rx_ring_size &&
+	    tx_size == priv->prof->tx_ring_size)
+		return 0;
+
+	mutex_lock(&mdev->state_lock);
+	if (priv->port_up) {
+		port_up = 1;
+		mlx4_en_stop_port(dev);
+	}
+
+	mlx4_en_free_resources(priv);
+
+	priv->prof->tx_ring_size = tx_size;
+	priv->prof->rx_ring_size = rx_size;
+
+	err = mlx4_en_alloc_resources(priv);
+	if (err) {
+		mlx4_err(mdev, "Failed reallocating port resources\n");
+		goto out;
+	}
+	if (port_up) {
+		err = mlx4_en_start_port(dev);
+		if (err)
+			mlx4_err(mdev, "Failed starting port\n");
+	}
+
+out:
+	mutex_unlock(&mdev->state_lock);
+	return err;
+}
+
 void mlx4_en_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 
 	memset(param, 0, sizeof(*param));
-	param->rx_max_pending = mdev->dev->caps.max_rq_sg;
-	param->tx_max_pending = mdev->dev->caps.max_sq_sg;
+	param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
+	param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
 	param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
 	param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
 }
@@ -459,7 +461,7 @@
 	.get_rx_csum = mlx4_en_get_rx_csum,
 	.set_rx_csum = mlx4_en_set_rx_csum,
 	.get_tx_csum = ethtool_op_get_tx_csum,
-	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+	.set_tx_csum = ethtool_op_set_tx_csum,
 	.get_strings = mlx4_en_get_strings,
 	.get_sset_count = mlx4_en_get_sset_count,
 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
@@ -471,6 +473,7 @@
 	.get_pauseparam = mlx4_en_get_pauseparam,
 	.set_pauseparam = mlx4_en_set_pauseparam,
 	.get_ringparam = mlx4_en_get_ringparam,
+	.set_ringparam = mlx4_en_set_ringparam,
 	.get_flags = ethtool_op_get_flags,
 	.set_flags = ethtool_op_set_flags,
 };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_port.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_port.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_port.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -147,6 +147,7 @@
 	struct mlx4_cmd_mailbox *mailbox;
 	u64 in_mod = reset << 8 | port;
 	int err;
+	int i;
 
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
@@ -161,39 +162,19 @@
 
 	spin_lock_bh(&priv->stats_lock);
 
-	stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
-			    be32_to_cpu(mlx4_en_stats->RDROP);
-	stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
-			    be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
-	stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
-			  be64_to_cpu(mlx4_en_stats->ROCT_novlan);
+	stats->rx_packets = 0;
+	stats->rx_bytes = 0;
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		stats->rx_packets += priv->rx_ring[i].packets;
+		stats->rx_bytes += priv->rx_ring[i].bytes;
+	}
+	stats->tx_packets = 0;
+	stats->tx_bytes = 0;
+	for (i = 0; i <= priv->tx_ring_num; i++) {
+		stats->tx_packets += priv->tx_ring[i].packets;
+		stats->tx_bytes += priv->tx_ring[i].bytes;
+	}
 
-	stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
-			  be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
-
 	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
 			   be32_to_cpu(mlx4_en_stats->RdropLength) +
 			   be32_to_cpu(mlx4_en_stats->RJBBR) +

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_resources.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_resources.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_resources.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -94,3 +94,9 @@
 
 	vunmap(buf->direct.buf);
 }
+
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
+{
+    return;
+}
+

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_rx.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_rx.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_rx.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -37,6 +37,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
 
 #include "mlx4_en.h"
 
@@ -51,18 +52,6 @@
 	return;
 }
 
-static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
-				   void **ip_hdr, void **tcpudp_hdr,
-				   u64 *hdr_flags, void *priv)
-{
-	*mac_hdr = page_address(frags->page) + frags->page_offset;
-	*ip_hdr = *mac_hdr + ETH_HLEN;
-	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
-	*hdr_flags = LRO_IPV4 | LRO_TCP;
-
-	return 0;
-}
-
 static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
 			      struct mlx4_en_rx_desc *rx_desc,
 			      struct skb_frag_struct *skb_frags,
@@ -144,7 +133,18 @@
 	}
 }
 
+static void
+mlx4_en_init_rx_desc_skb(struct mlx4_en_priv *priv,
+			 struct mlx4_en_rx_ring *ring, int index)
+{
+	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
 
+	/* Pre-link descriptor */
+	rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
+	rx_desc->data->byte_count = cpu_to_be32(priv->rx_skb_size);
+	rx_desc->data->lkey = cpu_to_be32(priv->mdev->mr.key);
+}
+
 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
 				 struct mlx4_en_rx_ring *ring, int index)
 {
@@ -176,7 +176,36 @@
 	}
 }
 
+static int
+mlx4_en_alloc_rx_skb(struct mlx4_en_priv *priv,
+		     struct mlx4_en_rx_desc *rx_desc,
+		     struct sk_buff **pskb)
+{
+	dma_addr_t dma;
+	int size = priv->rx_skb_size + NET_IP_ALIGN;
+	struct sk_buff *new_skb = alloc_skb(size, GFP_ATOMIC);
 
+	if (unlikely(new_skb == NULL))
+		return -ENOMEM;
+
+	new_skb->dev = priv->dev;
+	skb_reserve(new_skb, NET_IP_ALIGN);
+	dma = pci_map_single(priv->mdev->pdev, new_skb->data, size, DMA_FROM_DEVICE);
+	*pskb = new_skb;
+	rx_desc->data->addr = cpu_to_be64(dma);
+	return 0;
+}
+
+static int
+mlx4_en_prepare_rx_desc_skb(struct mlx4_en_priv *priv,
+			    struct mlx4_en_rx_ring *ring, int index)
+{
+	struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+	struct sk_buff **pskb = (struct sk_buff **) ring->rx_info + index;
+
+	return mlx4_en_alloc_rx_skb(priv, rx_desc, pskb);
+}
+
 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
 				   struct mlx4_en_rx_ring *ring, int index)
 {
@@ -208,24 +237,22 @@
 	struct mlx4_en_rx_ring *ring;
 	int ring_ind;
 	int buf_ind;
+	int err;
 
 	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
 		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
 			ring = &priv->rx_ring[ring_ind];
 
-			if (mlx4_en_prepare_rx_desc(priv, ring,
-						    ring->actual_size)) {
-				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
-					mlx4_err(mdev, "Failed to allocate "
-						       "enough rx buffers\n");
-					return -ENOMEM;
-				} else {
-					if (netif_msg_rx_err(priv))
-						mlx4_warn(mdev,
-							  "Only %d buffers allocated\n",
-							  ring->actual_size);
-					goto out;
-				}
+			if (ring->use_frags)
+				err = mlx4_en_prepare_rx_desc(priv, ring,
+							      ring->actual_size);
+			else
+				err = mlx4_en_prepare_rx_desc_skb(priv, ring,
+								  ring->actual_size);
+			if (err) {
+				mlx4_err(mdev, "Failed to allocate "
+					       "enough rx buffers\n");
+				return -ENOMEM;
 			}
 			ring->actual_size++;
 			ring->prod++;
@@ -243,8 +270,12 @@
 	int err;
 
 	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
-		err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
-					      ring->size_mask);
+		if (ring->use_frags)
+			err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
+						      ring->size_mask);
+		else
+			err = mlx4_en_prepare_rx_desc_skb(priv, ring, ring->prod &
+							  ring->size_mask);
 		if (err) {
 			if (netif_msg_rx_err(priv))
 				mlx4_warn(priv->mdev,
@@ -266,6 +297,7 @@
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct skb_frag_struct *skb_frags;
+	struct sk_buff *skb;
 	struct mlx4_en_rx_desc *rx_desc;
 	dma_addr_t dma;
 	int index;
@@ -279,17 +311,26 @@
 	while (ring->cons != ring->prod) {
 		index = ring->cons & ring->size_mask;
 		rx_desc = ring->buf + (index << ring->log_stride);
-		skb_frags = ring->rx_info + (index << priv->log_rx_info);
 		mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
 
-		for (nr = 0; nr < priv->num_frags; nr++) {
-			mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
-			dma = be64_to_cpu(rx_desc->data[nr].addr);
+		if (ring->use_frags) {
+			skb_frags = ring->rx_info + (index << priv->log_rx_info);
+			for (nr = 0; nr < priv->num_frags; nr++) {
+				mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+				dma = be64_to_cpu(rx_desc->data[nr].addr);
 
-			mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
-			pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+				mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
+				pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+						 PCI_DMA_FROMDEVICE);
+				put_page(skb_frags[nr].page);
+			}
+		} else {
+			skb = *((struct sk_buff **) ring->rx_info + index);
+			dma = be64_to_cpu(rx_desc->data->addr);
+			pci_unmap_single(mdev->pdev, dma,
+					 priv->rx_skb_size + NET_IP_ALIGN,
 					 PCI_DMA_FROMDEVICE);
-			put_page(skb_frags[nr].page);
+			kfree_skb(skb);
 		}
 		++ring->cons;
 	}
@@ -332,7 +373,7 @@
 
 
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
-			   struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+			   struct mlx4_en_rx_ring *ring, u32 size)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err;
@@ -346,12 +387,18 @@
 	ring->cons = 0;
 	ring->size = size;
 	ring->size_mask = size - 1;
-	ring->stride = stride;
+	ring->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+					  DS_SIZE * (ring->use_frags ?
+						     MLX4_EN_MAX_RX_FRAGS : 1));
 	ring->log_stride = ffs(ring->stride) - 1;
 	ring->buf_size = ring->size * ring->stride;
 
-	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
-					sizeof(struct skb_frag_struct));
+	if (ring->use_frags)
+		tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+						sizeof(struct skb_frag_struct));
+	else
+		tmp = size * sizeof(struct sk_buff *);
+
 	ring->rx_info = vmalloc(tmp);
 	if (!ring->rx_info) {
 		mlx4_err(mdev, "Failed allocating rx_info ring\n");
@@ -372,23 +419,11 @@
 	}
 	ring->buf = ring->wqres.buf.direct.buf;
 
-	/* Configure lro mngr */
-	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
-	ring->lro.dev = priv->dev;
-	ring->lro.features = LRO_F_NAPI;
-	ring->lro.frag_align_pad = NET_IP_ALIGN;
-	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
-	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-	ring->lro.max_desc = mdev->profile.num_lro;
-	ring->lro.max_aggr = MAX_SKB_FRAGS;
-	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
-				    sizeof(struct net_lro_desc),
-				    GFP_KERNEL);
-	if (!ring->lro.lro_arr) {
-		mlx4_err(mdev, "Failed to allocate lro array\n");
+	/* Allocate LRO sessions */
+	if (mdev->profile.num_lro && mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
+		mlx4_err(mdev, "Failed allocating lro sessions\n");
 		goto err_map;
 	}
-	ring->lro.get_frag_header = mlx4_en_get_frag_header;
 
 	return 0;
 
@@ -421,30 +456,38 @@
 		ring->cons = 0;
 		ring->actual_size = 0;
 		ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
-	
-		ring->stride = stride;
+
+		if (ring->use_frags)
+			ring->stride = stride;
 		ring->log_stride = ffs(ring->stride) - 1;
 		ring->buf_size = ring->size * ring->stride;
 	
 		memset(ring->rx_info, 0, sizeof(*(ring->rx_info)));
 		memset(ring->buf, 0, ring->buf_size);
 		mlx4_en_update_rx_prod_db(ring);
-	
-		/* Initailize all descriptors */
-		for (i = 0; i < ring->size; i++)
-			mlx4_en_init_rx_desc(priv, ring, i);
-	
-		/* Initialize page allocators */
-		err = mlx4_en_init_allocator(priv, ring);
-		if (err) {
-			 mlx4_err(mdev, "Failed initializing ring allocator\n");
-			 goto err_allocator;
+
+		if (ring->use_frags) {
+			/* Initailize all descriptors */
+			for (i = 0; i < ring->size; i++)
+				mlx4_en_init_rx_desc(priv, ring, i);
+
+			/* Initialize page allocators */
+			err = mlx4_en_init_allocator(priv, ring);
+			if (err) {
+				 mlx4_err(mdev, "Failed initializing ring allocator\n");
+				 ring_ind--;
+				 goto err_allocator;
+			}
+		} else {
+			for (i = 0; i < ring->size; i++)
+				mlx4_en_init_rx_desc_skb(priv, ring, i);
 		}
 	
 		/* Fill Rx buffers */
 		ring->full = 0;
 	}
-	if (mlx4_en_fill_rx_buffers(priv))
+	err = mlx4_en_fill_rx_buffers(priv);
+	if (err)
 		goto err_buffers;
 
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
@@ -467,6 +510,7 @@
 				     &ring->wqres.mtt, ring->wqres.db.dma, &ring->srq);
 		if (err){
 			mlx4_err(mdev, "Failed to allocate srq\n");
+			ring_ind--;
 			goto err_srq;
 		}
 		ring->srq.event = mlx4_en_srq_event;
@@ -490,7 +534,8 @@
 	ring_ind = priv->rx_ring_num - 1;
 err_allocator:
 	while (ring_ind >= 0) {
-		mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+		if (priv->rx_ring[ring_ind].use_frags)
+			mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
 		ring_ind--;
 	}
 	return err;
@@ -501,7 +546,8 @@
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 
-	kfree(ring->lro.lro_arr);
+	if (mdev->profile.num_lro)
+		mlx4_en_lro_destroy(ring);
 	mlx4_en_unmap_buffer(&ring->wqres.buf);
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 	vfree(ring->rx_info);
@@ -517,17 +563,18 @@
 	mlx4_srq_remove(mdev->dev, &ring->srq);
 	mlx4_srq_free(mdev->dev, &ring->srq);
 	mlx4_en_free_rx_buf(priv, ring);
-	mlx4_en_destroy_allocator(priv, ring);
+	if (ring->use_frags)
+		mlx4_en_destroy_allocator(priv, ring);
 }
 
 
 /* Unmap a completed descriptor and free unused pages */
-static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-				    struct mlx4_en_rx_desc *rx_desc,
-				    struct skb_frag_struct *skb_frags,
-				    struct skb_frag_struct *skb_frags_rx,
-				    struct mlx4_en_rx_alloc *page_alloc,
-				    int length)
+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+			     struct mlx4_en_rx_desc *rx_desc,
+			     struct skb_frag_struct *skb_frags,
+			     struct skb_frag_struct *skb_frags_rx,
+			     struct mlx4_en_rx_alloc *page_alloc,
+			     int length)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_frag_info *frag_info;
@@ -570,11 +617,11 @@
 }
 
 
-static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-				      struct mlx4_en_rx_desc *rx_desc,
-				      struct skb_frag_struct *skb_frags,
-				      struct mlx4_en_rx_alloc *page_alloc,
-				      unsigned int length)
+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+			       struct mlx4_en_rx_desc *rx_desc,
+			       struct skb_frag_struct *skb_frags,
+			       struct mlx4_en_rx_alloc *page_alloc,
+			       unsigned int length)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct sk_buff *skb;
@@ -612,6 +659,10 @@
 		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
 						      skb_shinfo(skb)->frags,
 						      page_alloc, length);
+		if (unlikely(!used_frags)) {
+			kfree_skb(skb);
+			return NULL;
+		}
 		skb_shinfo(skb)->nr_frags = used_frags;
 
 		/* Copy headers into the skb linear buffer */
@@ -655,7 +706,162 @@
 	}
 }
 
+static inline int invalid_cqe(struct mlx4_en_priv *priv,
+			      struct mlx4_cqe *cqe)
+{
+	/* Drop packet on bad receive or bad checksum */
+	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+		     MLX4_CQE_OPCODE_ERROR)) {
+		mlx4_err(priv->mdev, "CQE completed in error - vendor "
+			 "syndrom:%d syndrom:%d\n",
+			 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
+			 ((struct mlx4_err_cqe *) cqe)->syndrome);
+		return 1;
+	}
+	if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
+		mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+		return 1;;
+	}
 
+	return 0;
+}
+
+static struct sk_buff *
+mlx4_en_get_rx_skb(struct mlx4_en_priv *priv,
+		   struct mlx4_en_rx_desc *rx_desc,
+		   struct sk_buff **pskb,
+		   unsigned int length)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct sk_buff *skb;
+	dma_addr_t dma;
+
+	if (length <= MLX4_EN_SMALL_PKT_SIZE) {
+		skb = dev_alloc_skb(length + NET_IP_ALIGN);
+		if (unlikely(!skb))
+			return NULL;
+
+		skb_reserve(skb, NET_IP_ALIGN);
+		/* We are copying all relevant data to the skb - temporarily
+		 * synch buffers for the copy */
+		dma = be64_to_cpu(rx_desc->data->addr);
+		dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
+					      length, DMA_FROM_DEVICE);
+		skb_copy_to_linear_data(skb, (*pskb)->data, length);
+		dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
+						 length, DMA_FROM_DEVICE);
+
+	} else {
+		skb = *pskb;
+		if (unlikely(mlx4_en_alloc_rx_skb(priv, rx_desc, pskb)))
+			return NULL;
+
+		pci_unmap_single(mdev->pdev, be64_to_cpu(rx_desc->data->addr),
+				 be32_to_cpu(rx_desc->data->byte_count),
+				 PCI_DMA_FROMDEVICE);
+	}
+
+	skb->tail += length;
+	skb->len = length;
+	skb->truesize = length + sizeof(struct sk_buff);
+	return skb;
+}
+
+int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+			      struct mlx4_en_cq *cq, int budget)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_cqe *cqe;
+	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+	struct mlx4_en_rx_desc *rx_desc;
+	struct sk_buff **pskb;
+	struct sk_buff *skb;
+	int index;
+	unsigned int length;
+	int polled = 0;
+
+	if (!priv->port_up)
+		return 0;
+
+	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+	 * descriptor offset can be deduced from the CQE index instead of
+	 * reading 'cqe->index' */
+	index = cq->mcq.cons_index & ring->size_mask;
+	cqe = &cq->buf[index];
+
+	/* Process all completed CQEs */
+	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+		    cq->mcq.cons_index & cq->size)) {
+
+		pskb = (struct sk_buff **) ring->rx_info + index;
+		rx_desc = ring->buf + (index << ring->log_stride);
+
+		/*
+		 * make sure we read the CQE after we read the ownership bit
+		 */
+		rmb();
+
+		if (invalid_cqe(priv, cqe))
+			goto next;
+
+		/*
+		 * Packet is OK - process it.
+		 */
+		length = be32_to_cpu(cqe->byte_cnt);
+		ring->bytes += length;
+		ring->packets++;
+
+		skb = mlx4_en_get_rx_skb(priv, rx_desc, pskb, length);
+		if (unlikely(!skb))
+			goto next;
+		skb->protocol = eth_type_trans(skb, dev);
+
+		if (likely(priv->rx_csum && cqe->checksum == 0xffff)) {
+			priv->port_stats.rx_chksum_good++;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		} else {
+			priv->port_stats.rx_chksum_none++;
+			skb->ip_summed = CHECKSUM_NONE;
+			if (mdev->profile.ip_reasm &&
+			    cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4) &&
+			    !mlx4_en_rx_frags(priv, ring, skb, cqe))
+				goto next;
+		}
+
+		/* Push it up the stack */
+		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
+				    MLX4_CQE_VLAN_PRESENT_MASK)) {
+			vlan_hwaccel_receive_skb(skb, priv->vlgrp,
+						be16_to_cpu(cqe->sl_vid));
+		} else
+			netif_receive_skb(skb);
+
+		dev->last_rx = jiffies;
+
+next:
+		++cq->mcq.cons_index;
+		index = (cq->mcq.cons_index) & ring->size_mask;
+		cqe = &cq->buf[index];
+		if (++polled == budget)
+			goto out;
+	}
+
+	/* If CQ is empty, flush all pending IP reassembly sessions */
+	mlx4_en_flush_frags(priv, ring);
+
+out:
+	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+	mlx4_cq_set_ci(&cq->mcq);
+	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+	ring->cons = cq->mcq.cons_index;
+	ring->prod += polled; /* Polled descriptors were realocated in place */
+	if (unlikely(!ring->full))
+		mlx4_en_fill_rx_buf(dev, ring);
+	mlx4_en_update_rx_prod_db(ring);
+	return polled;
+}
+
 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -663,11 +869,9 @@
 	struct mlx4_cqe *cqe;
 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
 	struct skb_frag_struct *skb_frags;
-	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
 	struct mlx4_en_rx_desc *rx_desc;
 	struct sk_buff *skb;
 	int index;
-	int nr;
 	unsigned int length;
 	int polled = 0;
 	int ip_summed;
@@ -693,19 +897,8 @@
 		 */
 		rmb();
 
-		/* Drop packet on bad receive or bad checksum */
-		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
-						MLX4_CQE_OPCODE_ERROR)) {
-			mlx4_err(mdev, "CQE completed in error - vendor "
-				  "syndrom:%d syndrom:%d\n",
-				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
-				  ((struct mlx4_err_cqe *) cqe)->syndrome);
+		if (invalid_cqe(priv, cqe))
 			goto next;
-		}
-		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
-			mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
-			goto next;
-		}
 
 		/*
 		 * Packet is OK - process it.
@@ -715,41 +908,13 @@
 		ring->packets++;
 
 		if (likely(priv->rx_csum)) {
-			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
+			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
 			    (cqe->checksum == 0xffff)) {
 				priv->port_stats.rx_chksum_good++;
-				/* This packet is eligible for LRO if it is:
-				 * - DIX Ethernet (type interpretation)
-				 * - TCP/IP (v4)
-				 * - without IP options
-				 * - not an IP fragment */
-				if (mlx4_en_can_lro(cqe->status) &&
-				    dev->features & NETIF_F_LRO) {
-
-					nr = mlx4_en_complete_rx_desc(
-						priv, rx_desc,
-						skb_frags, lro_frags,
-						ring->page_alloc, length);
-					if (!nr)
-						goto next;
-
-					if (priv->vlgrp && (cqe->vlan_my_qpn &
-						MLX4_CQE_VLAN_PRESENT_MASK)) {
-						lro_vlan_hwaccel_receive_frags(
-						       &ring->lro, lro_frags,
-						       length, length,
-						       priv->vlgrp,
-						       be16_to_cpu(cqe->sl_vid),
-						       NULL, 0);
-					} else
-						lro_receive_frags(&ring->lro,
-								  lro_frags,
-								  length,
-								  length,
-								  NULL, 0);
-
+				if (mdev->profile.num_lro &&
+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
+						    skb_frags, length, cqe))
 					goto next;
-				}
 
 				/* LRO not possible, complete processing here */
 				ip_summed = CHECKSUM_UNNECESSARY;
@@ -790,13 +955,15 @@
 		if (++polled == budget) {
 			/* We are here because we reached the NAPI budget -
 			 * flush only pending LRO sessions */
-			lro_flush_all(&ring->lro);
+			if (mdev->profile.num_lro)
+				mlx4_en_lro_flush(priv, ring, 0);
 			goto out;
 		}
 	}
 
 	/* If CQ is empty flush all LRO sessions unconditionally */
-	lro_flush_all(&ring->lro);
+	if (mdev->profile.num_lro)
+		mlx4_en_lro_flush(priv, ring, 1);
 
 out:
 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
@@ -833,7 +1000,7 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	int done;
 
-	done = mlx4_en_process_rx_cq(dev, cq, budget);
+	done = cq->process_cq(dev, cq, budget);
 
 	/* If we used up all the quota - we're probably not done yet... */
 	if (done == budget)
@@ -931,12 +1098,6 @@
 	}
 }
 
-static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
-{
-    return;
-}
-
-
 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
 				 int qpn, int srqn, int cqn,
 				 enum mlx4_qp_state *state,
@@ -1027,8 +1188,8 @@
 
 	ptr = ((void *) &context) + 0x3c;
 	rss_context = (struct mlx4_en_rss_context *) ptr;
-	rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
-					    (rss_map->base_qpn));
+	rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size - 1) << 24 |
+					    (rss_map->base_qpn + 1));
 	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
 	rss_context->hash_fn = rss_xor & 0x3;
 	rss_context->flags = rss_mask << 2;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_tx.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_tx.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/en_tx.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -34,9 +34,12 @@
 #include <asm/page.h>
 #include <linux/mlx4/cq.h>
 #include <linux/mlx4/qp.h>
+#include <linux/in.h>
 #include <linux/skbuff.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
 
 #include "mlx4_en.h"
 
@@ -112,6 +115,7 @@
 		mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
 		goto err_reserve;
 	}
+	ring->qp.event = mlx4_en_sqp_event;
 
 	return 0;
 
@@ -203,19 +207,21 @@
 
 	/* Optimize the common case when there are no wraparounds */
 	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
-		if (tx_info->linear) {
-			pci_unmap_single(mdev->pdev,
-					 (dma_addr_t) be64_to_cpu(data->addr),
+		if (!tx_info->inl) {
+			if (tx_info->linear) {
+				pci_unmap_single(mdev->pdev,
+					(dma_addr_t) be64_to_cpu(data->addr),
 					 be32_to_cpu(data->byte_count),
 					 PCI_DMA_TODEVICE);
-			++data;
-		}
+				++data;
+			}
 
-		for (i = 0; i < frags; i++) {
-			frag = &skb_shinfo(skb)->frags[i];
-			pci_unmap_page(mdev->pdev,
-				       (dma_addr_t) be64_to_cpu(data[i].addr),
-				       frag->size, PCI_DMA_TODEVICE);
+			for (i = 0; i < frags; i++) {
+				frag = &skb_shinfo(skb)->frags[i];
+				pci_unmap_page(mdev->pdev,
+					(dma_addr_t) be64_to_cpu(data[i].addr),
+					frag->size, PCI_DMA_TODEVICE);
+			}
 		}
 		/* Stamp the freed descriptor */
 		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -224,27 +230,29 @@
 		}
 
 	} else {
-		if ((void *) data >= end) {
-			data = (struct mlx4_wqe_data_seg *)
-					(ring->buf + ((void *) data - end));
-		}
+		if (!tx_info->inl) {
+			if ((void *) data >= end) {
+				data = (struct mlx4_wqe_data_seg *)
+						(ring->buf + ((void *) data - end));
+			}
 
-		if (tx_info->linear) {
-			pci_unmap_single(mdev->pdev,
-					 (dma_addr_t) be64_to_cpu(data->addr),
+			if (tx_info->linear) {
+				pci_unmap_single(mdev->pdev,
+					(dma_addr_t) be64_to_cpu(data->addr),
 					 be32_to_cpu(data->byte_count),
 					 PCI_DMA_TODEVICE);
-			++data;
-		}
+				++data;
+			}
 
-		for (i = 0; i < frags; i++) {
-			/* Check for wraparound before unmapping */
-			if ((void *) data >= end)
-				data = (struct mlx4_wqe_data_seg *) ring->buf;
-			frag = &skb_shinfo(skb)->frags[i];
-			pci_unmap_page(mdev->pdev,
+			for (i = 0; i < frags; i++) {
+				/* Check for wraparound before unmapping */
+				if ((void *) data >= end)
+					data = (struct mlx4_wqe_data_seg *) ring->buf;
+				frag = &skb_shinfo(skb)->frags[i];
+				pci_unmap_page(mdev->pdev,
 					(dma_addr_t) be64_to_cpu(data->addr),
 					 frag->size, PCI_DMA_TODEVICE);
+			}
 		}
 		/* Stamp the freed descriptor */
 		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -297,7 +305,7 @@
 	int block = 8 / ring_num;
 	int extra = 8 - (block * ring_num);
 	int num = 0;
-	u16 ring = 1;
+	u16 ring = MLX4_EN_NUM_HASH_RINGS + 1;
 	int prio;
 
 	if (ring_num == 1) {
@@ -379,8 +387,8 @@
 
 	/* Wakeup Tx queue if this ring stopped it */
 	if (unlikely(ring->blocked)) {
-		if (((u32) (ring->prod - ring->cons) <=
-		     ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
+		if ((u32) (ring->prod - ring->cons) <=
+		     ring->size - HEADROOM - MAX_DESC_TXBBS) {
 
 			/* TODO: support multiqueue netdevs. Currently, we block
 			 * when *any* ring is full. Note that:
@@ -392,7 +400,7 @@
 			 *   transmission on that ring would stop the queue.
 			 */
 			ring->blocked = 0;
-			netif_wake_queue(dev);
+			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
 			priv->port_stats.wake_queue++;
 		}
 	}
@@ -404,14 +412,11 @@
 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
 	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
 
-	spin_lock_irq(&ring->comp_lock);
-	cq->armed = 0;
+	if (!spin_trylock(&ring->comp_lock))
+		return;
 	mlx4_en_process_tx_cq(cq->dev, cq);
-	if (ring->blocked)
-		mlx4_en_arm_cq(priv, cq);
-	else
-		mod_timer(&cq->timer, jiffies + 1);
-	spin_unlock_irq(&ring->comp_lock);
+	mod_timer(&cq->timer, jiffies + 1);
+	spin_unlock(&ring->comp_lock);
 }
 
 
@@ -424,8 +429,10 @@
 
 	INC_PERF_COUNTER(priv->pstats.tx_poll);
 
-	netif_tx_lock(priv->dev);
-	spin_lock_irq(&ring->comp_lock);
+	if (!spin_trylock(&ring->comp_lock)){
+		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+		return;
+	}
 	mlx4_en_process_tx_cq(cq->dev, cq);
 	inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
 
@@ -435,8 +442,7 @@
 	if (inflight && priv->port_up)
 		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
 
-	spin_unlock_irq(&ring->comp_lock);
-	netif_tx_unlock(priv->dev);
+	spin_unlock(&ring->comp_lock);
 }
 
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -479,7 +485,10 @@
 
 	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
 	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-		mlx4_en_process_tx_cq(priv->dev, cq);
+		if (spin_trylock(&ring->comp_lock)) {
+			mlx4_en_process_tx_cq(priv->dev, cq);
+			spin_unlock(&ring->comp_lock);
+		}
 }
 
 static void *get_frag_ptr(struct sk_buff *skb)
@@ -611,21 +620,55 @@
 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
 }
 
-static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
-			 u16 *vlan_tag)
+int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
-	int tx_ind;
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	u16 vlan_tag = 0;
+	int tx_ind = 0;
+	struct tcphdr *th = tcp_hdr(skb);
+	struct iphdr *iph = ip_hdr(skb);
+	struct mlx4_en_tx_hash_entry *entry;
+	u32 hash_index;
 
 	/* Obtain VLAN information if present */
 	if (priv->vlgrp && vlan_tx_tag_present(skb)) {
-		*vlan_tag = vlan_tx_tag_get(skb);
+		vlan_tag = vlan_tx_tag_get(skb);
 		/* Set the Tx ring to use according to vlan priority */
-		tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
-	} else {
-		*vlan_tag = 0;
-		tx_ind = 0;
+		tx_ind = priv->tx_prio_map[vlan_tag >> 13];
+		if (tx_ind)
+			return tx_ind;
 	}
-	return tx_ind;
+
+	/* Hashing is only done for TCP/IP or UDP/IP packets */
+	if (be16_to_cpu(skb->protocol) != ETH_P_IP)
+		return MLX4_EN_NUM_HASH_RINGS;
+
+	hash_index = be32_to_cpu(iph->daddr) & MLX4_EN_TX_HASH_MASK;
+	switch(iph->protocol) {
+	case IPPROTO_UDP:
+		break;
+	case IPPROTO_TCP:
+		hash_index = (hash_index ^ be16_to_cpu(th->dest ^ th->source)) &
+				MLX4_EN_TX_HASH_MASK;
+		break;
+	default:
+		return MLX4_EN_NUM_HASH_RINGS;
+	}
+
+	entry = &priv->tx_hash[hash_index];
+	if (skb->len > MLX4_EN_SMALL_PKT_SIZE)
+		entry->big_pkts++;
+	else
+		entry->small_pkts++;
+
+	if(unlikely(!(++entry->cnt))) {
+		tx_ind = hash_index & (MLX4_EN_NUM_HASH_RINGS / 2 - 1);
+		if (2 * entry->big_pkts > entry->small_pkts)
+			tx_ind += MLX4_EN_NUM_HASH_RINGS / 2;
+		entry->small_pkts = entry->big_pkts = 0;
+		entry->ring = tx_ind;
+	}
+	return entry->ring;
 }
 
 int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -645,7 +688,7 @@
 	dma_addr_t dma;
 	u32 index;
 	__be32 op_own;
-	u16 vlan_tag;
+	u16 vlan_tag = 0;
 	int i;
 	int lso_header_size;
 	void *fragptr;
@@ -668,15 +711,17 @@
 		return NETDEV_TX_OK;
 	}
 
-	tx_ind = get_vlan_info(priv, skb, &vlan_tag);
+	tx_ind = skb->queue_mapping;
 	ring = &priv->tx_ring[tx_ind];
+	if (priv->vlgrp && vlan_tx_tag_present(skb))
+		vlan_tag = vlan_tx_tag_get(skb);
 
 	/* Check available TXBBs And 2K spare for prefetch */
         if (unlikely(((int)(ring->prod - ring->cons)) >
 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
 		/* every full Tx ring stops queue.
 		 * TODO: implement multi-queue support (per-queue stop) */
-		netif_stop_queue(dev);
+		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
 		ring->blocked = 1;
 		priv->port_stats.queue_stopped++;
 
@@ -789,8 +834,11 @@
 			wmb();
 			data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
 		}
-	} else
+		tx_info->inl = 0;
+	} else {
 		build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+		tx_info->inl = 1;
+	}
 
 	ring->prod += nr_txbb;
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/mlx4_en.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/mlx4_en.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/drivers/net/mlx4/mlx4_en.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -38,7 +38,7 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
-#include <linux/inet_lro.h>
+#include <net/checksum.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -49,8 +49,8 @@
 #include "en_port.h"
 
 #define DRV_NAME	"mlx4_en"
-#define DRV_VERSION	"1.4.0"
-#define DRV_RELDATE	"Dec 2008"
+#define DRV_VERSION	"1.4.1"
+#define DRV_RELDATE	"April 2009"
 
 
 #define MLX4_EN_MSG_LEVEL	(NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -77,10 +77,10 @@
 
 #define MLX4_EN_PAGE_SHIFT	12
 #define MLX4_EN_PAGE_SIZE	(1 << MLX4_EN_PAGE_SHIFT)
-#define MAX_TX_RINGS		16
-#define MAX_RX_RINGS		16
+#define MAX_TX_RINGS		(MLX4_EN_NUM_HASH_RINGS + MLX4_EN_NUM_PPP_RINGS + 1)
+#define MAX_RX_RINGS		17
 #define MAX_RSS_MAP_SIZE	64
-#define RSS_FACTOR		2
+#define RSS_FACTOR		1
 #define TXBB_SIZE		64
 #define HEADROOM		(2048 / TXBB_SIZE + 1)
 #define MAX_LSO_HDR_SIZE	92
@@ -104,6 +104,7 @@
 #define MLX4_EN_ALLOC_SIZE	(PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
 
 #define MLX4_EN_MAX_LRO_DESCRIPTORS	32
+#define MLX4_EN_NUM_IPFRAG_SESSIONS	16
 
 /* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
  * and 4K allocations) */
@@ -115,12 +116,20 @@
 };
 #define MLX4_EN_MAX_RX_FRAGS	4
 
+/* Maximum ring sizes */
+#define MLX4_EN_MAX_TX_SIZE	8192
+#define MLX4_EN_MAX_RX_SIZE	8192
+
 /* Minimum ring size for our page-allocation sceme to work */
 #define MLX4_EN_MIN_RX_SIZE	(MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
 #define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
 
-#define MLX4_EN_TX_RING_NUM		9
-#define MLX4_EN_DEF_TX_RING_SIZE	1024
+#define MLX4_EN_SMALL_PKT_SIZE		64
+#define MLX4_EN_TX_HASH_SIZE		256
+#define MLX4_EN_TX_HASH_MASK		(MLX4_EN_TX_HASH_SIZE - 1)
+#define MLX4_EN_NUM_HASH_RINGS		8
+#define MLX4_EN_NUM_PPP_RINGS		8
+#define MLX4_EN_DEF_TX_RING_SIZE	512
 #define MLX4_EN_DEF_RX_RING_SIZE  	1024
 
 /* Target number of bytes to coalesce with interrupt moderation */
@@ -202,6 +211,7 @@
 	u32 nr_txbb;
 	u8 linear;
 	u8 data_offset;
+	u8 inl;
 };
 
 
@@ -253,17 +263,62 @@
 	spinlock_t comp_lock;
 };
 
+
+struct mlx4_en_ipfrag {
+	struct sk_buff *fragments;
+	struct sk_buff *last;
+	__be32          saddr;
+	__be32          daddr;
+	__be16          id;
+	u8              protocol;
+	int             total_len;
+	u16             offset;
+	unsigned int	vlan;
+	__be16		sl_vid;
+};
+
 struct mlx4_en_rx_desc {
 	struct mlx4_wqe_srq_next_seg next;
 	/* actual number of entries depends on rx ring stride */
 	struct mlx4_wqe_data_seg data[0];
 };
 
+struct mlx4_en_lro {
+	struct hlist_node node;
+	struct hlist_node flush_node;
+
+	/* Id fields come first: */
+	u32 saddr;
+	u32 daddr;
+	u32 sport_dport;
+	u32 next_seq;
+	u16 tot_len;
+	u8 psh;
+
+	u32 tsval;
+	u32 tsecr;
+	u32 ack_seq;
+	u16 window;
+	__be16 vlan_prio;
+	u16 has_vlan;
+	u16 has_timestamp;
+	u16 mss;
+	__wsum  data_csum;
+
+	unsigned long expires;
+	struct sk_buff *skb;
+	struct sk_buff *skb_last;
+};
+
+
 struct mlx4_en_rx_ring {
 	struct mlx4_srq srq;
 	struct mlx4_hwq_resources wqres;
 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
-	struct net_lro_mgr lro;
+	struct mlx4_en_lro lro;
+	struct hlist_head *lro_hash;
+	struct hlist_head lro_free;
+	struct hlist_head lro_flush;
 	u32 size ;	/* number of Rx descs*/
 	u32 actual_size;
 	u32 size_mask;
@@ -279,6 +334,8 @@
 	void *rx_info;
 	unsigned long bytes;
 	unsigned long packets;
+	struct mlx4_en_ipfrag ipfrag[MLX4_EN_NUM_IPFRAG_SESSIONS];
+	unsigned int use_frags;
 };
 
 
@@ -311,7 +368,7 @@
 	enum cq_type is_tx;
 	u16 moder_time;
 	u16 moder_cnt;
-	int armed;
+	int (*process_cq)(struct net_device *, struct mlx4_en_cq *, int);
 	struct mlx4_cqe *buf;
 #define MLX4_EN_OPCODE_ERROR	0x1e
 };
@@ -331,12 +388,10 @@
 struct mlx4_en_profile {
 	int rss_xor;
 	int num_lro;
+	int ip_reasm;
 	u8 rss_mask;
 	u32 active_ports;
 	u32 small_pkt_int;
-	int rx_moder_cnt;
-	int rx_moder_time;
-	int auto_moder;
 	u8 no_reset;
 	struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
 };
@@ -420,6 +475,13 @@
 
 };
 
+struct mlx4_en_tx_hash_entry {
+	u8 cnt;
+	unsigned int small_pkts;
+	unsigned int big_pkts;
+	unsigned int ring;
+};
+
 struct mlx4_en_priv {
 	struct mlx4_en_dev *mdev;
 	struct mlx4_en_port_profile *prof;
@@ -453,7 +515,6 @@
 	int port;
 	int registered;
 	int allocated;
-	int stride;
 	int rx_csum;
 	u64 mac;
 	int mac_index;
@@ -475,6 +536,7 @@
 	struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
 	struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
 	struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+	struct mlx4_en_tx_hash_entry tx_hash[MLX4_EN_TX_HASH_SIZE];
 	struct work_struct mcast_task;
 	struct work_struct mac_task;
 	struct delayed_work refill_task;
@@ -488,7 +550,10 @@
 	struct mlx4_en_stat_out_mbox hw_stats;
 };
 
-
+int mlx4_en_rx_frags(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
+		     struct sk_buff *skb, struct mlx4_cqe *cqe);
+void mlx4_en_flush_frags(struct mlx4_en_priv *priv,
+			 struct mlx4_en_rx_ring *ring);
 void mlx4_en_destroy_netdev(struct net_device *dev);
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 			struct mlx4_en_port_profile *prof);
@@ -496,6 +561,9 @@
 int mlx4_en_start_port(struct net_device *dev);
 void mlx4_en_stop_port(struct net_device *dev);
 
+void mlx4_en_free_resources(struct mlx4_en_priv *priv);
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+
 int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
 
 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
@@ -510,6 +578,7 @@
 void mlx4_en_poll_tx_cq(unsigned long data);
 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
 int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
 			   u32 size, u16 stride);
@@ -521,8 +590,7 @@
 				struct mlx4_en_tx_ring *ring);
 
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
-			   struct mlx4_en_rx_ring *ring,
-			   u32 size, u16 stride);
+			   struct mlx4_en_rx_ring *ring, u32 size);
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_rx_ring *ring);
 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
@@ -531,17 +599,42 @@
 int mlx4_en_process_rx_cq(struct net_device *dev,
 			  struct mlx4_en_cq *cq,
 			  int budget);
+int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+			      struct mlx4_en_cq *cq,
+			      int budget);
 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
 			     int is_tx, int rss, int qpn, int cqn, int srqn,
 			     struct mlx4_qp_context *context);
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
 int mlx4_en_map_buffer(struct mlx4_buf *buf);
 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
 
 void mlx4_en_calc_rx_buf(struct net_device *dev);
+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+			     struct mlx4_en_rx_desc *rx_desc,
+			     struct skb_frag_struct *skb_frags,
+			     struct skb_frag_struct *skb_frags_rx,
+			     struct mlx4_en_rx_alloc *page_alloc,
+			     int length);
+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+			       struct mlx4_en_rx_desc *rx_desc,
+			       struct skb_frag_struct *skb_frags,
+			       struct mlx4_en_rx_alloc *page_alloc,
+			       unsigned int length);
+
 void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
 				 struct mlx4_en_rss_map *rss_map,
 				 int num_entries, int num_rings);
+
+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
+		   struct mlx4_en_rx_desc *rx_desc,
+		   struct skb_frag_struct *skb_frags,
+		   unsigned int length, struct mlx4_cqe *cqe);
+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
+
 void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/fs/nfsctl.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/fs/nfsctl.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/fs/nfsctl.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,112 +0,0 @@
-/*
- *	fs/nfsctl.c
- *
- *	This should eventually move to userland.
- *
- */
-#include <linux/types.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/syscall.h>
-#include <linux/linkage.h>
-#include <linux/namei.h>
-#include <linux/mount.h>
-#include <linux/syscalls.h>
-#include <asm/uaccess.h>
-
-/*
- * open a file on nfsd fs
- */
-
-static struct file *do_open(char *name, int flags)
-{
-	struct nameidata nd;
-	struct vfsmount *mnt;
-	int error;
-
-	mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
-	if (IS_ERR(mnt))
-		return (struct file *)mnt;
-
-	error = vfs_path_lookup(mnt->mnt_root, mnt, name, 0, &nd);
-	mntput(mnt);	/* drop do_kern_mount reference */
-	if (error)
-		return ERR_PTR(error);
-
-	if (flags == O_RDWR)
-		error = may_open(&nd,MAY_READ|MAY_WRITE,FMODE_READ|FMODE_WRITE);
-	else
-		error = may_open(&nd, MAY_WRITE, FMODE_WRITE);
-
-	if (!error)
-		return dentry_open(nd.path.dentry, nd.path.mnt, flags);
-
-	path_put(&nd.path);
-	return ERR_PTR(error);
-}
-
-static struct {
-	char *name; int wsize; int rsize;
-} map[] = {
-	[NFSCTL_SVC] = {
-		.name	= ".svc",
-		.wsize	= sizeof(struct nfsctl_svc)
-	},
-	[NFSCTL_ADDCLIENT] = {
-		.name	= ".add",
-		.wsize	= sizeof(struct nfsctl_client)
-	},
-	[NFSCTL_DELCLIENT] = {
-		.name	= ".del",
-		.wsize	= sizeof(struct nfsctl_client)
-	},
-	[NFSCTL_EXPORT] = {
-		.name	= ".export",
-		.wsize	= sizeof(struct nfsctl_export)
-	},
-	[NFSCTL_UNEXPORT] = {
-		.name	= ".unexport",
-		.wsize	= sizeof(struct nfsctl_export)
-	},
-	[NFSCTL_GETFD] = {
-		.name	= ".getfd",
-		.wsize	= sizeof(struct nfsctl_fdparm),
-		.rsize	= NFS_FHSIZE
-	},
-	[NFSCTL_GETFS] = {
-		.name	= ".getfs",
-		.wsize	= sizeof(struct nfsctl_fsparm),
-		.rsize	= sizeof(struct knfsd_fh)
-	},
-};
-
-long
-asmlinkage sys_nfsservctl(int cmd, struct nfsctl_arg __user *arg, void __user *res)
-{
-	struct file *file;
-	void __user *p = &arg->u;
-	int version;
-	int err;
-
-	if (copy_from_user(&version, &arg->ca_version, sizeof(int)))
-		return -EFAULT;
-
-	if (version != NFSCTL_VERSION)
-		return -EINVAL;
-
-	if (cmd < 0 || cmd >= ARRAY_SIZE(map) || !map[cmd].name)
-		return -EINVAL;
-
-	file = do_open(map[cmd].name, map[cmd].rsize ? O_RDWR : O_WRONLY);	
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-	err = file->f_op->write(file, p, map[cmd].wsize, &file->f_pos);
-	if (err >= 0 && map[cmd].rsize)
-		err = file->f_op->read(file, res, map[cmd].rsize, &file->f_pos);
-	if (err >= 0)
-		err = 0;
-	fput(file);
-	return err;
-}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -40,6 +40,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/inetdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,16 +7,24 @@
 static inline struct net_device *xxx_ip_dev_find(u32 addr)
 {
 	struct net_device *dev;
-	u32 ip;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	struct in_device *in_dev;
 
 	read_lock(&dev_base_lock);
-	for (dev = dev_base; dev; dev = dev->next) {
-		ip = inet_select_addr(dev, 0, RT_SCOPE_HOST);
-		if (ip == addr) {
-			dev_hold(dev);
-			break;
+	for (dev = dev_base; dev; dev = dev->next)
+		if ((in_dev = in_dev_get(dev))) {
+			for (ifap = &in_dev->ifa_list; (ifa = *ifap);
+			     ifap = &ifa->ifa_next) {
+				if (addr == ifa->ifa_address) {
+					dev_hold(dev);
+					in_dev_put(in_dev);
+					goto found;
+				}
+			}
+			in_dev_put(in_dev);
 		}
-	}
+found:
 	read_unlock(&dev_base_lock);
 
 	return dev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -40,6 +40,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/inetdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,16 +7,24 @@
 static inline struct net_device *xxx_ip_dev_find(u32 addr)
 {
 	struct net_device *dev;
-	u32 ip;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	struct in_device *in_dev;
 
 	read_lock(&dev_base_lock);
-	for (dev = dev_base; dev; dev = dev->next) {
-		ip = inet_select_addr(dev, 0, RT_SCOPE_HOST);
-		if (ip == addr) {
-			dev_hold(dev);
-			break;
+	for (dev = dev_base; dev; dev = dev->next)
+		if ((in_dev = in_dev_get(dev))) {
+			for (ifap = &in_dev->ifa_list; (ifa = *ifap);
+			     ifap = &ifa->ifa_next) {
+				if (addr == ifa->ifa_address) {
+					dev_hold(dev);
+					in_dev_put(in_dev);
+					goto found;
+				}
+			}
+			in_dev_put(in_dev);
 		}
-	}
+found:
 	read_unlock(&dev_base_lock);
 
 	return dev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/log2.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/log2.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/log2.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -64,6 +64,15 @@
 	return 1UL << fls_long(n - 1);
 }
 
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+	return 1UL << (fls_long(n) - 1);
+}
+
 /**
  * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
  * @n - parameter
@@ -166,4 +175,20 @@
 	__roundup_pow_of_two(n)			\
  )
 
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n)			\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n == 1) ? 0 :			\
+		(1UL << ilog2(n))) :		\
+	__rounddown_pow_of_two(n)		\
+ )
+
 #endif /* _LINUX_LOG2_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -16,6 +16,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/spinlock_types.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/spinlock_types.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp1/include/linux/spinlock_types.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef BACKPORT_LINUX_SPINLOCK_TYPES_H
+#define BACKPORT_LINUX_SPINLOCK_TYPES_H
+
+#include_next <linux/spinlock_types.h>
+#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/asm-generic/atomic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/asm-generic/atomic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/asm-generic/atomic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,40 @@
+#ifndef __BACKPORT_ASM_GENERIC_ATOMIC_H
+#define __BACKPORT_ASM_GENERIC_ATOMIC_H
+
+#include_next <asm-generic/atomic.h>
+
+#if BITS_PER_LONG == 64
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_dec_return(v);
+}
+
+#else
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_dec_return(v);
+}
+
+#endif  /*  BITS_PER_LONG == 64  */
+
+#endif  /*  __BACKPORT_ASM_GENERIC_ATOMIC_H  */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/backing-dev.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/backing-dev.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/backing-dev.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,72 @@
+#ifndef BACKPORT_LINUX_BACK_DEV_H
+#define BACKPORT_LINUX_BACK_DEV_H
+
+#include <linux/mm.h>
+#include_next <linux/backing-dev.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+struct device;
+
+enum bdi_stat_item {
+	BDI_RECLAIMABLE,
+	BDI_WRITEBACK,
+	NR_BDI_STAT_ITEMS
+};
+
+
+static inline void inc_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline int bdi_init(struct backing_dev_info *bdi)
+{
+	return 0;
+}
+
+static inline void bdi_destroy(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+				const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+{
+	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
+}
+
+static inline void bdi_unregister(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+static inline void set_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/capability.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/capability.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/capability.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+#ifndef BACKPORT_LINUX_CAPABILITY_H
+#define BACKPORT_LINUX_CAPABILITY_H
+
+#include_next <linux/capability.h>
+
+/* Override MAC access.
+   The base kernel enforces no MAC policy.
+   An LSM may enforce a MAC policy, and if it does and it chooses
+   to implement capability based overrides of that policy, this is
+   the capability it should use to do so. */
+
+#define CAP_MAC_OVERRIDE     32
+
+#define CAP_FS_MASK_B0	(CAP_TO_MASK(CAP_CHOWN)			\
+			 | CAP_TO_MASK(CAP_DAC_OVERRIDE)	\
+			 | CAP_TO_MASK(CAP_DAC_READ_SEARCH)	\
+			 | CAP_TO_MASK(CAP_FOWNER)		\
+			 | CAP_TO_MASK(CAP_FSETID))
+
+#define CAP_FS_MASK_B1	(CAP_TO_MASK(CAP_MAC_OVERRIDE))
+
+#define CAP_NFSD_SET	(CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE))
+#define CAP_FS_SET	(CAP_FS_MASK_B0)
+
+static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+					      const kernel_cap_t permitted)
+{
+	const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
+	return cap_combine(a,
+			   cap_intersect(permitted, __cap_nfsd_set));
+}
+
+static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
+{
+	const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
+	return cap_drop(a, __cap_fs_set);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/completion.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/completion.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/completion.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_COMPLETION_H
+#define BACKPORT_LINUX_COMPLETION_H
+
+#include_next <linux/completion.h>
+
+#define wait_for_completion_killable(_args) wait_for_completion_interruptible(_args)
+
+#endif /* BACKPORT_LINUX_COMPLETION_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/dcache.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/dcache.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/dcache.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_DCACHE_H
+#define BACKPORT_LINUX_DCACHE_H
+
+#include_next <linux/dcache.h>
+
+#define d_materialise_unique(dentry, inode) d_add_unique(dentry, inode)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/err.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/err.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/err.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+#ifndef BACKPORT_LINUX_ERR_H
+#define BACKPORT_LINUX_ERR_H
+
+#include_next <linux/err.h>
+
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void *ERR_CAST(const void *ptr)
+{
+	/* cast away the const */
+	return (void *) ptr;
+}
+
+#endif /* BACKPORT_LINUX_ERR_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/exportfs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/exportfs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/exportfs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,60 @@
+#ifndef BACKPORT_LINUX_EXPORTFS_H
+#define BACKPORT_LINUX_EXPORTFS_H
+
+#ifndef LINUX_EXPORTFS_H
+#define LINUX_EXPORTFS_H 1
+
+#include <linux/types.h>
+
+struct dentry;
+struct inode;
+struct super_block;
+struct vfsmount;
+
+/*
+ * The fileid_type identifies how the file within the filesystem is encoded.
+ * In theory this is freely set and parsed by the filesystem, but we try to
+ * stick to conventions so we can share some generic code and don't confuse
+ * sniffers like ethereal/wireshark.
+ *
+ * The filesystem must not use the value '0' or '0xff'.
+ */
+enum fid_type {
+	/*
+	 * The root, or export point, of the filesystem.
+	 * (Never actually passed down to the filesystem.
+	 */
+	FILEID_ROOT = 0,
+
+	/*
+	 * 32bit inode number, 32 bit generation number.
+	 */
+	FILEID_INO32_GEN = 1,
+
+	/*
+	 * 32bit inode number, 32 bit generation number,
+	 * 32 bit parent directory inode number.
+	 */
+	FILEID_INO32_GEN_PARENT = 2,
+};
+
+struct fid {
+	union {
+		struct {
+			u32 ino;
+			u32 gen;
+			u32 parent_ino;
+			u32 parent_gen;
+		} i32;
+		__u32 raw[0];
+	};
+};
+
+extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
+	int *max_len, int connectable);
+extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+	int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+	void *context);
+
+#endif /* LINUX_EXPORTFS_H */
+#endif /* BACKPORT_LINUX_EXPORTFS_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,12 @@
+#ifndef _BACKPORT_LINUX_FILE_H_
+#define _BACKPORT_LINUX_FILE_H_
+
+#include_next <linux/file.h>
+#include <linux/fs.h>
+
+static inline void drop_file_write_access(struct file *filp)
+{
+	put_write_access(filp->f_dentry->d_inode);
+}
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/freezer.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/freezer.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/freezer.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,19 +1,6 @@
 #ifndef BACKPORT_LINUX_FREEZER_H
 #define BACKPORT_LINUX_FREEZER_H
-/*
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void freeze(struct task_struct *p) { BUG(); }
-static inline int thaw_process(struct task_struct *p) { return 1; }
-static inline void frozen_process(struct task_struct *p) { BUG(); }
 
-static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
-static inline void thaw_processes(void) {}
-
-static inline int try_to_freeze(void) { return 0; }
-*/
-
 static inline void set_freezable(void) {}
 
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/fs.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,1842 +1,85 @@
 #ifndef BACKPORT_LINUX_FS_H
 #define BACKPORT_LINUX_FS_H
 
-#ifndef _LINUX_FS_H
-#define _LINUX_FS_H
+#include_next <linux/fs.h>
+#include <linux/mount.h>
 
-/*
- * This file has definitions for some important file table
- * structures etc.
- */
+#define FILE_LOCK_DEFERRED 1
 
-#include <linux/config.h>
-#include <linux/limits.h>
-#include <linux/ioctl.h>
-#include <linux/spinlock.h>
-#include <linux/idr.h>
+#define ATTR_KILL_PRIV  (1 << 14)
 
-/*
- * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
- * the file limit at runtime and only root can increase the per-process
- * nr_file rlimit, so it's safe to set up a ridiculously high absolute
- * upper limit on files-per-process.
- *
- * Some programs (notably those using select()) may have to be 
- * recompiled to take full advantage of the new limits..  
- */
+#define FMODE_EXEC	16
 
-/* Fixed constants first: */
-#undef NR_OPEN
-#define NR_OPEN (1024*1024)	/* Absolute upper limit on fd num */
-#define INR_OPEN 1024		/* Initial setting for nfile rlimits */
+#define SEEK_SET	0	/* seek relative to beginning of file */
+#define SEEK_CUR	1	/* seek relative to current file position */
+#define SEEK_END	2	/* seek relative to end of file */
 
-#define BLOCK_SIZE_BITS 10
-#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
+#define i_private u.generic_ip
 
-/* And dynamically-tunable limits and defaults: */
-struct files_stat_struct {
-	int nr_files;		/* read only */
-	int nr_free_files;	/* read only */
-	int max_files;		/* tunable */
-};
-extern struct files_stat_struct files_stat;
-extern int get_max_files(void);
-
-struct inodes_stat_t {
-	int nr_inodes;
-	int nr_unused;
-	int dummy[5];
-};
-extern struct inodes_stat_t inodes_stat;
-
-extern int leases_enable, lease_break_time;
-
-#ifdef CONFIG_DNOTIFY
-extern int dir_notify_enable;
-#endif
-
-#define NR_FILE  8192	/* this can well be larger on a larger system */
-
-#define MAY_EXEC 1
-#define MAY_WRITE 2
-#define MAY_READ 4
-#define MAY_APPEND 8
-
-#define FMODE_READ 1
-#define FMODE_WRITE 2
-
-/* Internal kernel extensions */
-#define FMODE_LSEEK	4
-#define FMODE_PREAD	8
-#define FMODE_PWRITE	FMODE_PREAD	/* These go hand in hand */
-
-#define RW_MASK		1
-#define RWA_MASK	2
-#define READ 0
-#define WRITE 1
-#define READA 2		/* read-ahead  - don't block if no resources */
-#define SWRITE 3	/* for ll_rw_block() - wait for buffer lock */
-#define SPECIAL 4	/* For non-blockdevice requests in request queue */
-#define READ_SYNC	(READ | (1 << BIO_RW_SYNC))
-#define WRITE_SYNC	(WRITE | (1 << BIO_RW_SYNC))
-#define WRITE_BARRIER	((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
-
-#define SEL_IN		1
-#define SEL_OUT		2
-#define SEL_EX		4
-
-/* public flags for file_system_type */
-#define FS_REQUIRES_DEV 1 
-#define FS_BINARY_MOUNTDATA 2
-#define FS_REVAL_DOT	16384	/* Check the paths ".", ".." for staleness */
-#define FS_RENAME_DOES_D_MOVE	32768	/* FS will handle d_move()
-					 * during rename() internally.
-					 */
-
-/*
- * These are the fs-independent mount-flags: up to 32 flags are supported
- */
-#define MS_RDONLY	 1	/* Mount read-only */
-#define MS_NOSUID	 2	/* Ignore suid and sgid bits */
-#define MS_NODEV	 4	/* Disallow access to device special files */
-#define MS_NOEXEC	 8	/* Disallow program execution */
-#define MS_SYNCHRONOUS	16	/* Writes are synced at once */
-#define MS_REMOUNT	32	/* Alter flags of a mounted FS */
-#define MS_MANDLOCK	64	/* Allow mandatory locks on an FS */
-#define MS_DIRSYNC	128	/* Directory modifications are synchronous */
-#define MS_NOATIME	1024	/* Do not update access times. */
-#define MS_NODIRATIME	2048	/* Do not update directory access times */
-#define MS_BIND		4096
-#define MS_MOVE		8192
-#define MS_REC		16384
-#define MS_VERBOSE	32768
-#define MS_POSIXACL	(1<<16)	/* VFS does not apply the umask */
-#define MS_UNBINDABLE	(1<<17)	/* change to unbindable */
-#define MS_PRIVATE	(1<<18)	/* change to private */
-#define MS_SLAVE	(1<<19)	/* change to slave */
-#define MS_SHARED	(1<<20)	/* change to shared */
-#define MS_WITHAPPEND	(1<<21) /* iop->permission() understands MAY_APPEND */
-#define MS_LOOP_NO_AOPS	(1<<29)	/* Hack to allow OCFS2 1.2 to work with loop */
-#define MS_ACTIVE	(1<<30)
-#define MS_NOUSER	(1<<31)
-
-/*
- * Superblock flags that can be altered by MS_REMOUNT
- */
-#define MS_RMT_MASK	(MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK)
-
-/*
- * Old magic mount flag and mask
- */
-#define MS_MGC_VAL 0xC0ED0000
-#define MS_MGC_MSK 0xffff0000
-
-/* Inode flags - they have nothing to superblock flags now */
-
-#define S_SYNC		1	/* Writes are synced at once */
-#define S_NOATIME	2	/* Do not update access times */
-#define S_APPEND	4	/* Append-only file */
-#define S_IMMUTABLE	8	/* Immutable file */
-#define S_DEAD		16	/* removed, but still open directory */
-#define S_NOQUOTA	32	/* Inode is not counted to quota */
-#define S_DIRSYNC	64	/* Directory modifications are synchronous */
-#define S_NOCMTIME	128	/* Do not update file c/mtime */
-#define S_SWAPFILE	256	/* Do not truncate: swapon got its bmaps */
-#define S_PRIVATE	512	/* Inode is fs-internal */
-
-/*
- * Note that nosuid etc flags are inode-specific: setting some file-system
- * flags just means all the inodes inherit those flags by default. It might be
- * possible to override it selectively if you really wanted to with some
- * ioctl() that is not currently implemented.
- *
- * Exception: MS_RDONLY is always applied to the entire file system.
- *
- * Unfortunately, it is possible to change a filesystems flags with it mounted
- * with files in use.  This means that all of the inodes will not have their
- * i_flags updated.  Hence, i_flags no longer inherit the superblock mount
- * flags, so these have to be checked separately. -- rmk at arm.uk.linux.org
- */
-#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
-
-#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
-#define IS_SYNC(inode)		(__IS_FLG(inode, MS_SYNCHRONOUS) || \
-					((inode)->i_flags & S_SYNC))
-#define IS_DIRSYNC(inode)	(__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
-					((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
-#define IS_MANDLOCK(inode)	__IS_FLG(inode, MS_MANDLOCK)
-#define IS_WITHAPPEND(inode)	__IS_FLG(inode, MS_WITHAPPEND)
-
-#define IS_NOQUOTA(inode)	((inode)->i_flags & S_NOQUOTA)
-#define IS_APPEND(inode)	((inode)->i_flags & S_APPEND)
-#define IS_IMMUTABLE(inode)	((inode)->i_flags & S_IMMUTABLE)
-#define IS_POSIXACL(inode)	__IS_FLG(inode, MS_POSIXACL)
-
-#define IS_DEADDIR(inode)	((inode)->i_flags & S_DEAD)
-#define IS_NOCMTIME(inode)	((inode)->i_flags & S_NOCMTIME)
-#define IS_SWAPFILE(inode)	((inode)->i_flags & S_SWAPFILE)
-#define IS_PRIVATE(inode)	((inode)->i_flags & S_PRIVATE)
-
-/* the read-only stuff doesn't really belong here, but any other place is
-   probably as bad and I don't want to create yet another include file. */
-
-#define BLKROSET   _IO(0x12,93)	/* set device read-only (0 = read-write) */
-#define BLKROGET   _IO(0x12,94)	/* get read-only status (0 = read_write) */
-#define BLKRRPART  _IO(0x12,95)	/* re-read partition table */
-#define BLKGETSIZE _IO(0x12,96)	/* return device size /512 (long *arg) */
-#define BLKFLSBUF  _IO(0x12,97)	/* flush buffer cache */
-#define BLKRASET   _IO(0x12,98)	/* set read ahead for block device */
-#define BLKRAGET   _IO(0x12,99)	/* get current read ahead setting */
-#define BLKFRASET  _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */
-#define BLKFRAGET  _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
-#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
-#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
-#define BLKSSZGET  _IO(0x12,104)/* get block device sector size */
-#if 0
-#define BLKPG      _IO(0x12,105)/* See blkpg.h */
-
-/* Some people are morons.  Do not use sizeof! */
-
-#define BLKELVGET  _IOR(0x12,106,size_t)/* elevator get */
-#define BLKELVSET  _IOW(0x12,107,size_t)/* elevator set */
-/* This was here just to show that the number is taken -
-   probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */
-#endif
-/* A jump here: 108-111 have been used for various private purposes. */
-#define BLKBSZGET  _IOR(0x12,112,size_t)
-#define BLKBSZSET  _IOW(0x12,113,size_t)
-#define BLKGETSIZE64 _IOR(0x12,114,size_t)	/* return device size in bytes (u64 *arg) */
-#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
-#define BLKTRACESTART _IO(0x12,116)
-#define BLKTRACESTOP _IO(0x12,117)
-#define BLKTRACETEARDOWN _IO(0x12,118)
-
-#define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
-#define FIBMAP	   _IO(0x00,1)	/* bmap access */
-#define FIGETBSZ   _IO(0x00,2)	/* get the block size used for bmap */
-
-#ifdef __KERNEL__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/dcache.h>
-#include <linux/stat.h>
-#include <linux/cache.h>
-#include <linux/kobject.h>
-#include <linux/list.h>
-#include <linux/radix-tree.h>
-#include <linux/prio_tree.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-
-#include <asm/atomic.h>
-#include <asm/semaphore.h>
-#include <asm/byteorder.h>
-
-struct export_operations;
-struct hd_geometry;
-struct iovec;
-struct nameidata;
-struct kiocb;
-struct pipe_inode_info;
-struct poll_table_struct;
-struct kstatfs;
-struct vm_area_struct;
-struct vfsmount;
-
-extern void __init inode_init(unsigned long);
-extern void __init inode_init_early(void);
-extern void __init mnt_init(unsigned long);
-extern void __init files_init(unsigned long);
-
-struct buffer_head;
-typedef int (get_block_t)(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create);
-typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
-			unsigned long max_blocks,
-			struct buffer_head *bh_result, int create);
-typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
-			ssize_t bytes, void *private);
-
-/*
- * Attribute flags.  These should be or-ed together to figure out what
- * has been changed!
- */
-#define ATTR_MODE	1
-#define ATTR_UID	2
-#define ATTR_GID	4
-#define ATTR_SIZE	8
-#define ATTR_ATIME	16
-#define ATTR_MTIME	32
-#define ATTR_CTIME	64
-#define ATTR_ATIME_SET	128
-#define ATTR_MTIME_SET	256
-#define ATTR_FORCE	512	/* Not a change, but a change it */
-#define ATTR_ATTR_FLAG	1024
-#define ATTR_KILL_SUID	2048
-#define ATTR_KILL_SGID	4096
-#define ATTR_FILE	8192
-#define ATTR_NO_BLOCK	32768	/* Return EAGAIN and don't block on long truncates */
-
-/*
- * This is the Inode Attributes structure, used for notify_change().  It
- * uses the above definitions as flags, to know which values have changed.
- * Also, in this manner, a Filesystem can look at only the values it cares
- * about.  Basically, these are the attributes that the VFS layer can
- * request to change from the FS layer.
- *
- * Derek Atkins <warlord at MIT.EDU> 94-10-20
- */
-struct iattr {
-	unsigned int	ia_valid;
-	umode_t		ia_mode;
-	uid_t		ia_uid;
-	gid_t		ia_gid;
-	loff_t		ia_size;
-	struct timespec	ia_atime;
-	struct timespec	ia_mtime;
-	struct timespec	ia_ctime;
-
-	/*
-	 * Not an attribute, but an auxilary info for filesystems wanting to
-	 * implement an ftruncate() like method.  NOTE: filesystem should
-	 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
-	 *
-	 * The LSM hooks also use this to distinguish operations on a file
-	 * descriptors from operations on pathnames.
-	 */
-	struct file	*ia_file;
-};
-
-/*
- * Includes for diskquotas.
- */
-#include <linux/quota.h>
-
-/** 
- * enum positive_aop_returns - aop return codes with specific semantics
- *
- * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
- * 			    completed, that the page is still locked, and
- * 			    should be considered active.  The VM uses this hint
- * 			    to return the page to the active list -- it won't
- * 			    be a candidate for writeback again in the near
- * 			    future.  Other callers must be careful to unlock
- * 			    the page if they get this return.  Returned by
- * 			    writepage(); 
- *
- * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
- *  			unlocked it and the page might have been truncated.
- *  			The caller should back up to acquiring a new page and
- *  			trying again.  The aop will be taking reasonable
- *  			precautions not to livelock.  If the caller held a page
- *  			reference, it should drop it before retrying.  Returned
- *  			by readpage(), prepare_write(), and commit_write().
- *
- * address_space_operation functions return these large constants to indicate
- * special semantics to the caller.  These are much larger than the bytes in a
- * page to allow for functions that return the number of bytes operated on in a
- * given page.
- */
-
-enum positive_aop_returns {
-	AOP_WRITEPAGE_ACTIVATE	= 0x80000,
-	AOP_TRUNCATED_PAGE	= 0x80001,
-};
-
-/*
- * oh the beauties of C type declarations.
- */
-struct page;
-struct address_space;
-struct writeback_control;
-
-struct address_space_operations {
-	int (*writepage)(struct page *page, struct writeback_control *wbc);
-	int (*readpage)(struct file *, struct page *);
-	int (*sync_page)(struct page *);
-
-	/* Write back some dirty pages from this mapping. */
-	int (*writepages)(struct address_space *, struct writeback_control *);
-
-	/* Set a page dirty */
-	int (*set_page_dirty)(struct page *page);
-
-	int (*readpages)(struct file *filp, struct address_space *mapping,
-			struct list_head *pages, unsigned nr_pages);
-
-	/*
-	 * ext3 requires that a successful prepare_write() call be followed
-	 * by a commit_write() call - they must be balanced
-	 */
-	int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
-	int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
-	/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
-	sector_t (*bmap)(struct address_space *, sector_t);
-	int (*invalidatepage) (struct page *, unsigned long);
-	int (*releasepage) (struct page *, gfp_t);
-	ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-			loff_t offset, unsigned long nr_segs);
-	struct page* (*get_xip_page)(struct address_space *, sector_t,
-			int);
-	/* migrate the contents of a page to the specified target */
-	int (*migratepage) (struct page *, struct page *);
-};
-
-struct backing_dev_info;
-struct address_space {
-	struct inode		*host;		/* owner: inode, block_device */
-	struct radix_tree_root	page_tree;	/* radix tree of all pages */
-	rwlock_t		tree_lock;	/* and rwlock protecting it */
-	unsigned int		i_mmap_writable;/* count VM_SHARED mappings */
-	struct prio_tree_root	i_mmap;		/* tree of private and shared mappings */
-	struct list_head	i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
-	spinlock_t		i_mmap_lock;	/* protect tree, count, list */
-	unsigned int		truncate_count;	/* Cover race condition with truncate */
-	unsigned long		nrpages;	/* number of total pages */
-	pgoff_t			writeback_index;/* writeback starts here */
-	struct address_space_operations *a_ops;	/* methods */
-	unsigned long		flags;		/* error bits/gfp mask */
-	struct backing_dev_info *backing_dev_info; /* device readahead, etc */
-	spinlock_t		private_lock;	/* for use by the address_space */
-	struct list_head	private_list;	/* ditto */
-	struct address_space	*assoc_mapping;	/* ditto */
-#ifdef CONFIG_PAGE_STATES
-	unsigned int		mlocked;	/* set if VM_LOCKED vmas present */
-#endif
-} __attribute__((aligned(sizeof(long))));
-	/*
-	 * On most architectures that alignment is already the case; but
-	 * must be enforced here for CRIS, to let the least signficant bit
-	 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
-	 */
-
-static inline void mapping_set_mlocked(struct address_space *mapping)
+enum inode_i_mutex_lock_class
 {
-#if defined(CONFIG_PAGE_STATES)
-	mapping->mlocked = 1;
-#endif
-}
-
-struct block_device {
-	dev_t			bd_dev;  /* not a kdev_t - it's a search key */
-	struct inode *		bd_inode;	/* will die */
-	int			bd_openers;
-	struct semaphore	bd_sem;	/* open/close mutex */
-	struct semaphore	bd_mount_sem;	/* mount mutex */
-	struct list_head	bd_inodes;
-	void *			bd_holder;
-	int			bd_holders;
-	struct block_device *	bd_contains;
-	unsigned		bd_block_size;
-	struct hd_struct *	bd_part;
-	/* number of times partitions within this device have been opened. */
-	unsigned		bd_part_count;
-	int			bd_invalidated;
-	struct gendisk *	bd_disk;
-	struct list_head	bd_list;
-	struct backing_dev_info *bd_inode_backing_dev_info;
-	/*
-	 * Private data.  You must have bd_claim'ed the block_device
-	 * to use this.  NOTE:  bd_claim allows an owner to claim
-	 * the same device multiple times, the owner must take special
-	 * care to not mess up bd_private for that case.
-	 */
-	unsigned long		bd_private;
+	I_MUTEX_NORMAL,
+	I_MUTEX_PARENT,
+	I_MUTEX_CHILD,
+	I_MUTEX_XATTR,
+	I_MUTEX_QUOTA
 };
 
-/*
- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
- * radix trees
- */
-#define PAGECACHE_TAG_DIRTY	0
-#define PAGECACHE_TAG_WRITEBACK	1
-
-int mapping_tagged(struct address_space *mapping, int tag);
-
-/*
- * Might pages of this file be mapped into userspace?
- */
-static inline int mapping_mapped(struct address_space *mapping)
+static inline void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
 {
-	return	!prio_tree_empty(&mapping->i_mmap) ||
-		!list_empty(&mapping->i_mmap_nonlinear);
+	new->fl_owner = fl->fl_owner;
+	new->fl_pid = fl->fl_pid;
+	new->fl_file = NULL;
+	new->fl_flags = fl->fl_flags;
+	new->fl_type = fl->fl_type;
+	new->fl_start = fl->fl_start;
+	new->fl_end = fl->fl_end;
+	new->fl_ops = NULL;
+	new->fl_lmops = NULL;
 }
 
-/*
- * Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
- * marks vma as VM_SHARED if it is shared, and the file was opened for
- * writing i.e. vma may be mprotected writable even if now readonly.
- */
-static inline int mapping_writably_mapped(struct address_space *mapping)
+static inline int __mandatory_lock(struct inode *ino)
 {
-	return mapping->i_mmap_writable != 0;
+	return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
 }
 
-/*
- * Use sequence counter to get consistent i_size on 32-bit processors.
- */
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-#include <linux/seqlock.h>
-#define __NEED_I_SIZE_ORDERED
-#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
-#else
-#define i_size_ordered_init(inode) do { } while (0)
-#endif
+#define mandatory_lock(_args) MANDATORY_LOCK(_args)
 
-struct inode {
-	struct hlist_node	i_hash;
-	struct list_head	i_list;
-	struct list_head	i_sb_list;
-	struct list_head	i_dentry;
-	unsigned long		i_ino;
-	atomic_t		i_count;
-	umode_t			i_mode;
-	unsigned int		i_nlink;
-	uid_t			i_uid;
-	gid_t			i_gid;
-	dev_t			i_rdev;
-	loff_t			i_size;
-	struct timespec		i_atime;
-	struct timespec		i_mtime;
-	struct timespec		i_ctime;
-	unsigned int		i_blkbits;
-	unsigned long		i_blksize;
-	unsigned long		i_version;
-	unsigned long		i_blocks;
-	unsigned short          i_bytes;
-	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
-	struct mutex		i_mutex;
-	struct rw_semaphore	i_alloc_sem;
-	struct inode_operations	*i_op;
-	struct file_operations	*i_fop;	/* former ->i_op->default_file_ops */
-	struct super_block	*i_sb;
-	struct file_lock	*i_flock;
-	struct address_space	*i_mapping;
-	struct address_space	i_data;
-#ifdef CONFIG_QUOTA
-	struct dquot		*i_dquot[MAXQUOTAS];
-#endif
-	/* These three should probably be a union */
-	struct list_head	i_devices;
-	struct pipe_inode_info	*i_pipe;
-	struct block_device	*i_bdev;
-	struct cdev		*i_cdev;
-	int			i_cindex;
-
-	__u32			i_generation;
-
-#ifdef CONFIG_DNOTIFY
-	unsigned long		i_dnotify_mask; /* Directory notify events */
-	struct dnotify_struct	*i_dnotify; /* for directory notifications */
-#endif
-
-#ifdef CONFIG_INOTIFY
-	struct list_head	inotify_watches; /* watches on this inode */
-	struct semaphore	inotify_sem;	/* protects the watches list */
-#endif
-
-	unsigned long		i_state;
-	unsigned long		dirtied_when;	/* jiffies of first dirtying */
-
-	unsigned int		i_flags;
-
-	atomic_t		i_writecount;
-	void			*i_security;
-	union {
-		void		*generic_ip;
-	} u;
-#ifdef __NEED_I_SIZE_ORDERED
-	seqcount_t		i_size_seqcount;
-#endif
-};
-
-/*
- * NOTE: in a 32bit arch with a preemptable kernel and
- * an UP compile the i_size_read/write must be atomic
- * with respect to the local cpu (unlike with preempt disabled),
- * but they don't need to be atomic with respect to other cpus like in
- * true SMP (so they need either to either locally disable irq around
- * the read or for example on x86 they can be still implemented as a
- * cmpxchg8b without the need of the lock prefix). For SMP compiles
- * and 64bit archs it makes no difference if preempt is enabled or not.
- */
-static inline loff_t i_size_read(struct inode *inode)
+#ifdef CONFIG_DEBUG_WRITECOUNT
+static inline void file_take_write(struct file *f)
 {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-	loff_t i_size;
-	unsigned int seq;
-
-	do {
-		seq = read_seqcount_begin(&inode->i_size_seqcount);
-		i_size = inode->i_size;
-	} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
-	return i_size;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
-	loff_t i_size;
-
-	preempt_disable();
-	i_size = inode->i_size;
-	preempt_enable();
-	return i_size;
-#else
-	return inode->i_size;
-#endif
+	WARN_ON(f->f_mnt_write_state != 0);
+	f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
 }
-
-
-static inline void i_size_write(struct inode *inode, loff_t i_size)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-	write_seqcount_begin(&inode->i_size_seqcount);
-	inode->i_size = i_size;
-	write_seqcount_end(&inode->i_size_seqcount);
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
-	preempt_disable();
-	inode->i_size = i_size;
-	preempt_enable();
 #else
-	inode->i_size = i_size;
+static inline void file_take_write(struct file *filp) {}
 #endif
-}
 
-static inline unsigned iminor(struct inode *inode)
+static inline int inode_permission(struct inode *inode, int flags)
 {
-	return MINOR(inode->i_rdev);
+	return permission(inode, flags, NULL);
 }
 
-static inline unsigned imajor(struct inode *inode)
+static inline int __mnt_is_readonly(struct vfsmount *mnt)
 {
-	return MAJOR(inode->i_rdev);
-}
-
-extern struct block_device *I_BDEV(struct inode *inode);
-
-struct fown_struct {
-	rwlock_t lock;          /* protects pid, uid, euid fields */
-	int pid;		/* pid or -pgrp where SIGIO should be sent */
-	uid_t uid, euid;	/* uid/euid of process setting the owner */
-	void *security;
-	int signum;		/* posix.1b rt signal to be delivered on IO */
-};
-
-/*
- * Track a single file's readahead state
- */
-struct file_ra_state {
-	unsigned long start;		/* Current window */
-	unsigned long size;
-	unsigned long flags;		/* ra flags RA_FLAG_xxx*/
-	unsigned long cache_hit;	/* cache hit count*/
-	unsigned long prev_page;	/* Cache last read() position */
-	unsigned long ahead_start;	/* Ahead window */
-	unsigned long ahead_size;
-	unsigned long ra_pages;		/* Maximum readahead window */
-	unsigned long mmap_hit;		/* Cache hit stat for mmap accesses */
-	unsigned long mmap_miss;	/* Cache miss stat for mmap accesses */
-};
-#define RA_FLAG_MISS 0x01	/* a cache miss occured against this file */
-#define RA_FLAG_INCACHE 0x02	/* file is already in cache */
-
-struct file {
-	/*
-	 * fu_list becomes invalid after file_free is called and queued via
-	 * fu_rcuhead for RCU freeing
-	 */
-	union {
-		struct list_head	fu_list;
-		struct rcu_head 	fu_rcuhead;
-	} f_u;
-	struct dentry		*f_dentry;
-	struct vfsmount         *f_vfsmnt;
-	struct file_operations	*f_op;
-	atomic_t		f_count;
-	unsigned int 		f_flags;
-	mode_t			f_mode;
-	loff_t			f_pos;
-	struct fown_struct	f_owner;
-	unsigned int		f_uid, f_gid;
-	struct file_ra_state	f_ra;
-
-	unsigned long		f_version;
-	void			*f_security;
-
-	/* needed for tty driver, and maybe others */
-	void			*private_data;
-
-#ifdef CONFIG_EPOLL
-	/* Used by fs/eventpoll.c to link all the hooks to this file */
-	struct list_head	f_ep_links;
-	spinlock_t		f_ep_lock;
-#endif /* #ifdef CONFIG_EPOLL */
-	struct address_space	*f_mapping;
-};
-extern spinlock_t files_lock;
-#define file_list_lock() spin_lock(&files_lock);
-#define file_list_unlock() spin_unlock(&files_lock);
-
-#define get_file(x)	atomic_inc(&(x)->f_count)
-#define file_count(x)	atomic_read(&(x)->f_count)
-
-#define	MAX_NON_LFS	((1UL<<31) - 1)
-
-/* Page cache limit. The filesystems should put that into their s_maxbytes 
-   limits, otherwise bad things can happen in VM. */ 
-#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE	(((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
-#elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE 	0x7fffffffffffffffUL
-#endif
-
-#define FL_POSIX	1
-#define FL_FLOCK	2
-#define FL_ACCESS	8	/* not trying to lock, just looking */
-#define FL_EXISTS	16	/* when unlocking, test for existence */
-#define FL_LEASE	32	/* lease held on this file */
-#define FL_CLOSE	64	/* unlock on close */
-#define FL_SLEEP	128	/* A blocking lock */
-
-/*
- * The POSIX file lock owner is determined by
- * the "struct files_struct" in the thread group
- * (or NULL for no owner - BSD locks).
- *
- * Lockd stuffs a "host" pointer into this.
- */
-typedef struct files_struct *fl_owner_t;
-
-struct file_lock_operations {
-	void (*fl_insert)(struct file_lock *);	/* lock insertion callback */
-	void (*fl_remove)(struct file_lock *);	/* lock removal callback */
-	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
-	void (*fl_release_private)(struct file_lock *);
-};
-
-struct lock_manager_operations {
-	int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
-	void (*fl_notify)(struct file_lock *);	/* unblock callback */
-	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
-	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
-	void (*fl_release_private)(struct file_lock *);
-	void (*fl_break)(struct file_lock *);
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
-	int (*fl_change)(struct file_lock **, int);
-};
-
-/* that will die - we need it for nfs_lock_info */
-#include <linux/nfs_fs_i.h>
-
-struct file_lock {
-	struct file_lock *fl_next;	/* singly linked list for this inode  */
-	struct list_head fl_link;	/* doubly linked list of all locks */
-	struct list_head fl_block;	/* circular list of blocked processes */
-	fl_owner_t fl_owner;
-	unsigned int fl_pid;
-	wait_queue_head_t fl_wait;
-	struct file *fl_file;
-	unsigned char fl_flags;
-	unsigned char fl_type;
-	loff_t fl_start;
-	loff_t fl_end;
-
-	struct fasync_struct *	fl_fasync; /* for lease break notifications */
-	unsigned long fl_break_time;	/* for nonblocking lease breaks */
-
-	struct file_lock_operations *fl_ops;	/* Callbacks for filesystems */
-	struct lock_manager_operations *fl_lmops;	/* Callbacks for lockmanagers */
-	union {
-		struct nfs_lock_info	nfs_fl;
-		struct nfs4_lock_info	nfs4_fl;
-	} fl_u;
-};
-
-/* The following constant reflects the upper bound of the file/locking space */
-#ifndef OFFSET_MAX
-#define INT_LIMIT(x)	(~((x)1 << (sizeof(x)*8 - 1)))
-#define OFFSET_MAX	INT_LIMIT(loff_t)
-#define OFFT_OFFSET_MAX	INT_LIMIT(off_t)
-#endif
-
-#include <linux/fcntl.h>
-
-extern int fcntl_getlk(struct file *, struct flock __user *);
-extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
-			struct flock __user *);
-
-#if BITS_PER_LONG == 32
-extern int fcntl_getlk64(struct file *, struct flock64 __user *);
-extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
-			struct flock64 __user *);
-#endif
-
-extern void send_sigio(struct fown_struct *fown, int fd, int band);
-extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
-extern int fcntl_getlease(struct file *filp);
-
-/* fs/locks.c */
-extern void locks_init_lock(struct file_lock *);
-extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void locks_remove_posix(struct file *, fl_owner_t);
-extern void locks_remove_flock(struct file *);
-extern void posix_test_lock(struct file *, struct file_lock *);
-extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file_wait(struct file *, struct file_lock *);
-extern int posix_unblock_lock(struct file *, struct file_lock *);
-extern int posix_locks_deadlock(struct file_lock *, struct file_lock *);
-extern int vfs_test_lock(struct file *, struct file_lock *);
-extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
-extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags);
-extern void lease_get_mtime(struct inode *, struct timespec *time);
-extern int generic_setlease(struct file *, long, struct file_lock **);
-extern int vfs_setlease(struct file *, long, struct file_lock **);
-extern int lease_modify(struct file_lock **, int);
-extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
-extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
-extern void steal_locks(fl_owner_t from);
-
-struct fasync_struct {
-	int	magic;
-	int	fa_fd;
-	struct	fasync_struct	*fa_next; /* singly linked list */
-	struct	file 		*fa_file;
-};
-
-#define FASYNC_MAGIC 0x4601
-
-/* SMP safe fasync helpers: */
-extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
-/* can be called from interrupts */
-extern void kill_fasync(struct fasync_struct **, int, int);
-/* only for net: no internal synchronization */
-extern void __kill_fasync(struct fasync_struct *, int, int);
-
-extern int f_setown(struct file *filp, unsigned long arg, int force);
-extern void f_delown(struct file *filp);
-extern int send_sigurg(struct fown_struct *fown);
-
-/*
- *	Umount options
- */
-
-#define MNT_FORCE	0x00000001	/* Attempt to forcibily umount */
-#define MNT_DETACH	0x00000002	/* Just detach from the tree */
-#define MNT_EXPIRE	0x00000004	/* Mark for expiry */
-
-extern struct list_head super_blocks;
-extern spinlock_t sb_lock;
-
-#define sb_entry(list)	list_entry((list), struct super_block, s_list)
-#define S_BIAS (1<<30)
-struct super_block {
-	struct list_head	s_list;		/* Keep this first */
-	dev_t			s_dev;		/* search index; _not_ kdev_t */
-	unsigned long		s_blocksize;
-	unsigned char		s_blocksize_bits;
-	unsigned char		s_dirt;
-	unsigned long long	s_maxbytes;	/* Max file size */
-	struct file_system_type	*s_type;
-	struct super_operations	*s_op;
-	struct dquot_operations	*dq_op;
- 	struct quotactl_ops	*s_qcop;
-	struct export_operations *s_export_op;
-	unsigned long		s_flags;
-	unsigned long		s_magic;
-	struct dentry		*s_root;
-	struct rw_semaphore	s_umount;
-	struct mutex		s_lock;
-	int			s_count;
-	int			s_syncing;
-	int			s_need_sync_fs;
-	atomic_t		s_active;
-	void                    *s_security;
-	struct xattr_handler	**s_xattr;
-
-	struct list_head	s_inodes;	/* all inodes */
-	struct list_head	s_dirty;	/* dirty inodes */
-	struct list_head	s_io;		/* parked for writeback */
-	struct hlist_head	s_anon;		/* anonymous dentries for (nfs) exporting */
-	struct list_head	s_files;
-
-	struct block_device	*s_bdev;
-	struct list_head	s_instances;
-	struct quota_info	s_dquot;	/* Diskquota specific options */
-
-	unsigned int		s_prunes;	/* protected by dcache_lock */
-	wait_queue_head_t	s_wait_prunes;
-
-	int			s_frozen;
-	wait_queue_head_t	s_wait_unfrozen;
-
-	char s_id[32];				/* Informational name */
-
-	void 			*s_fs_info;	/* Filesystem private info */
-
-	/*
-	 * The next field is for VFS *only*. No filesystems have any business
-	 * even looking at it. You had been warned.
-	 */
-	struct semaphore s_vfs_rename_sem;	/* Kludge */
-
-	/* Granuality of c/m/atime in ns.
-	   Cannot be worse than a second */
-	u32		   s_time_gran;
-
-	/* for fs's with dynamic i_ino values, track them with idr, and
-	 * increment the generation every time we register a new inode */
-	u32			s_generation;
-	struct idr		s_inode_ids;
-	spinlock_t		s_inode_ids_lock;
-};
-
-extern struct timespec current_fs_time(struct super_block *sb);
-
-/*
- * Snapshotting support.
- */
-enum {
-	SB_UNFROZEN = 0,
-	SB_FREEZE_WRITE	= 1,
-	SB_FREEZE_TRANS = 2,
-};
-
-#define vfs_check_frozen(sb, level) \
-	wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
-
-static inline void get_fs_excl(void)
-{
-	atomic_inc(&current->fs_excl);
-}
-
-static inline void put_fs_excl(void)
-{
-	atomic_dec(&current->fs_excl);
-}
-
-static inline int has_fs_excl(void)
-{
-	return atomic_read(&current->fs_excl);
-}
-
-
-/*
- * Superblock locking.
- */
-static inline void lock_super(struct super_block * sb)
-{
-	get_fs_excl();
-	mutex_lock(&sb->s_lock);
-}
-
-static inline void unlock_super(struct super_block * sb)
-{
-	put_fs_excl();
-	mutex_unlock(&sb->s_lock);
-}
-
-/*
- * VFS helper functions..
- */
-extern int vfs_permission(struct nameidata *, int);
-extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
-extern int vfs_mkdir(struct inode *, struct dentry *, struct vfsmount *, int);
-extern int vfs_mknod(struct inode *, struct dentry *, struct vfsmount *, int, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, struct vfsmount *, const char *, int);
-extern int vfs_link(struct dentry *, struct vfsmount *, struct inode *, struct dentry *, struct vfsmount *);
-extern int vfs_rmdir(struct inode *, struct dentry *, struct vfsmount *);
-extern int vfs_unlink(struct inode *, struct dentry *, struct vfsmount *);
-extern int vfs_rename(struct inode *, struct dentry *, struct vfsmount *, struct inode *, struct dentry *, struct vfsmount *);
-
-/*
- * VFS dentry helper functions.
- */
-extern void dentry_unhash(struct dentry *dentry);
-
-/*
- * VFS file helper functions.
- */
-extern int file_permission(struct file *, int);
-
-/*
- * File types
- *
- * NOTE! These match bits 12..15 of stat.st_mode
- * (ie "(i_mode >> 12) & 15").
- */
-#define DT_UNKNOWN	0
-#define DT_FIFO		1
-#define DT_CHR		2
-#define DT_DIR		4
-#define DT_BLK		6
-#define DT_REG		8
-#define DT_LNK		10
-#define DT_SOCK		12
-#define DT_WHT		14
-
-#define OSYNC_METADATA	(1<<0)
-#define OSYNC_DATA	(1<<1)
-#define OSYNC_INODE	(1<<2)
-int generic_osync_inode(struct inode *, struct address_space *, int);
-
-/*
- * This is the "filldir" function type, used by readdir() to let
- * the kernel specify what kind of dirent layout it wants to have.
- * This allows the kernel to read directories into kernel space or
- * to have different dirent layouts depending on the binary type.
- */
-typedef int (*filldir_t)(void *, const char *, int, loff_t, ino_t, unsigned);
-
-struct block_device_operations {
-	int (*open) (struct inode *, struct file *);
-	int (*release) (struct inode *, struct file *);
-	int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
-	long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
-	long (*compat_ioctl) (struct file *, unsigned, unsigned long);
-	int (*direct_access) (struct block_device *, sector_t, unsigned long *);
-	int (*media_changed) (struct gendisk *);
-	int (*revalidate_disk) (struct gendisk *);
-	int (*getgeo)(struct block_device *, struct hd_geometry *);
-	struct module *owner;
-};
-
-/*
- * "descriptor" for what we're up to with a read for sendfile().
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
-	size_t written;
-	size_t count;
-	union {
-		char __user * buf;
-		void *data;
-	} arg;
-	int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long);
-
-/* These macros are for out of kernel modules to test that
- * the kernel supports the unlocked_ioctl and compat_ioctl
- * fields in struct file_operations. */
-#define HAVE_COMPAT_IOCTL 1
-#define HAVE_UNLOCKED_IOCTL 1
-
-/*
- * NOTE:
- * read, write, poll, fsync, readv, writev, unlocked_ioctl and compat_ioctl
- * can be called without the big kernel lock held in all filesystems.
- */
-struct file_operations {
-	struct module *owner;
-	loff_t (*llseek) (struct file *, loff_t, int);
-	ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
-	ssize_t (*aio_read) (struct kiocb *, char __user *, size_t, loff_t);
-	ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
-	ssize_t (*aio_write) (struct kiocb *, const char __user *, size_t, loff_t);
-	int (*readdir) (struct file *, void *, filldir_t);
-	unsigned int (*poll) (struct file *, struct poll_table_struct *);
-	int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
-	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
-	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
-	int (*mmap) (struct file *, struct vm_area_struct *);
-	int (*open) (struct inode *, struct file *);
-	int (*flush) (struct file *, fl_owner_t id);
-	int (*release) (struct inode *, struct file *);
-	int (*fsync) (struct file *, struct dentry *, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
-	int (*fasync) (int, struct file *, int);
-	int (*lock) (struct file *, int, struct file_lock *);
-	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, loff_t *);
-	ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
-	ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
-	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
-	int (*setlease)(struct file *, long, struct file_lock **);
-	unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
-	int (*check_flags)(int);
-	int (*dir_notify)(struct file *filp, unsigned long arg);
-	int (*flock) (struct file *, int, struct file_lock *);
-#define HAVE_FOP_OPEN_EXEC
-	int (*open_exec) (struct inode *);
-};
-
-struct inode_operations {
-	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
-	struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
-	int (*link) (struct dentry *,struct inode *,struct dentry *);
-	int (*unlink) (struct inode *,struct dentry *);
-	int (*symlink) (struct inode *,struct dentry *,const char *);
-	int (*mkdir) (struct inode *,struct dentry *,int);
-	int (*rmdir) (struct inode *,struct dentry *);
-	int (*mknod) (struct inode *,struct dentry *,int,dev_t);
-	int (*rename) (struct inode *, struct dentry *,
-			struct inode *, struct dentry *);
-	int (*readlink) (struct dentry *, char __user *,int);
-	void * (*follow_link) (struct dentry *, struct nameidata *);
-	void (*put_link) (struct dentry *, struct nameidata *, void *);
-	void (*truncate) (struct inode *);
-	int (*permission) (struct inode *, int, struct nameidata *);
-	int (*may_create) (struct inode *, int);
-	int (*may_delete) (struct inode *, struct inode *);
-	int (*setattr) (struct dentry *, struct iattr *);
-	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
-	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
-	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
-	ssize_t (*listxattr) (struct dentry *, char *, size_t);
-	int (*removexattr) (struct dentry *, const char *);
-	void (*truncate_range)(struct inode *, loff_t, loff_t);
-};
-
-struct seq_file;
-
-extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
-extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
-		unsigned long, loff_t *);
-extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
-		unsigned long, loff_t *);
-
-/*
- * NOTE: write_inode, delete_inode, clear_inode, put_inode can be called
- * without the big kernel lock held in all filesystems.
- */
-struct super_operations {
-   	struct inode *(*alloc_inode)(struct super_block *sb);
-	void (*destroy_inode)(struct inode *);
-
-	void (*read_inode) (struct inode *);
-  
-   	void (*dirty_inode) (struct inode *);
-	int (*write_inode) (struct inode *, int);
-	void (*put_inode) (struct inode *);
-	void (*drop_inode) (struct inode *);
-	void (*delete_inode) (struct inode *);
-	void (*put_super) (struct super_block *);
-	void (*write_super) (struct super_block *);
-	int (*sync_fs)(struct super_block *sb, int wait);
-	void (*write_super_lockfs) (struct super_block *);
-	void (*unlockfs) (struct super_block *);
-	int (*statfs) (struct dentry *, struct kstatfs *);
-	int (*remount_fs) (struct super_block *, int *, char *);
-	void (*clear_inode) (struct inode *);
-	void (*umount_begin) (struct super_block *);
-
-	int (*show_options)(struct seq_file *, struct vfsmount *);
-
-	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
-	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
-};
-
-/* Inode state bits.  Protected by inode_lock. */
-#define I_DIRTY_SYNC		1 /* Not dirty enough for O_DATASYNC */
-#define I_DIRTY_DATASYNC	2 /* Data-related inode changes pending */
-#define I_DIRTY_PAGES		4 /* Data-related inode changes pending */
-#define __I_LOCK		3
-#define I_LOCK			(1 << __I_LOCK)
-#define I_FREEING		16
-#define I_CLEAR			32
-#define I_NEW			64
-#define I_WILL_FREE		128
-#define I_DIRTY_DELAYED		256
-
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
-
-extern void __mark_inode_dirty(struct inode *, int);
-static inline void mark_inode_dirty(struct inode *inode)
-{
-	__mark_inode_dirty(inode, I_DIRTY);
-}
-
-static inline void mark_inode_dirty_sync(struct inode *inode)
-{
-	__mark_inode_dirty(inode, I_DIRTY_SYNC);
-}
-
-extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
-static inline void file_accessed(struct file *file)
-{
-	if (!(file->f_flags & O_NOATIME))
-		touch_atime(file->f_vfsmnt, file->f_dentry);
-}
-
-extern void touch_atime_delayed(struct vfsmount *mnt, struct dentry *dentry);
-static inline void file_accessed_delayed(struct file *file)
-{
-	if (!(file->f_flags & O_NOATIME))
-		touch_atime_delayed(file->f_vfsmnt, file->f_dentry);
-}
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
-
-struct file_system_type {
-	const char *name;
-	int fs_flags;
-	struct super_block *(*get_sb) (struct file_system_type *, int,
-				       const char *, void *);
-	void (*kill_sb) (struct super_block *);
-	struct module *owner;
-	struct file_system_type * next;
-	struct list_head fs_supers;
-};
-
-struct super_block *get_sb_bdev(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data,
-	int (*fill_super)(struct super_block *, void *, int));
-struct super_block *get_sb_single(struct file_system_type *fs_type,
-	int flags, void *data,
-	int (*fill_super)(struct super_block *, void *, int));
-struct super_block *get_sb_nodev(struct file_system_type *fs_type,
-	int flags, void *data,
-	int (*fill_super)(struct super_block *, void *, int));
-void generic_shutdown_super(struct super_block *sb);
-void kill_block_super(struct super_block *sb);
-void kill_anon_super(struct super_block *sb);
-void kill_litter_super(struct super_block *sb);
-void deactivate_super(struct super_block *sb);
-int set_anon_super(struct super_block *s, void *data);
-struct super_block *sget(struct file_system_type *type,
-			int (*test)(struct super_block *,void *),
-			int (*set)(struct super_block *,void *),
-			void *data);
-struct super_block *get_sb_pseudo(struct file_system_type *, char *,
-			struct super_operations *ops, unsigned long);
-int __put_super(struct super_block *sb);
-int __put_super_and_need_restart(struct super_block *sb);
-void unnamed_dev_init(void);
-
-/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
-#define fops_get(fops) \
-	(((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
-#define fops_put(fops) \
-	do { if (fops) module_put((fops)->owner); } while(0)
-
-extern int register_filesystem(struct file_system_type *);
-extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount(struct file_system_type *);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern void umount_tree(struct vfsmount *, int, struct list_head *);
-extern void release_mounts(struct list_head *);
-extern long do_mount(char *, char *, char *, unsigned long, void *);
-extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
-extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
-				  struct vfsmount *);
-
-extern int vfs_statfs(struct dentry *, struct kstatfs *);
-
-/* /sys/fs */
-extern struct subsystem fs_subsys;
-
-#define FLOCK_VERIFY_READ  1
-#define FLOCK_VERIFY_WRITE 2
-
-extern int locks_mandatory_locked(struct inode *);
-extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit -  an otherwise meaningless combination.
- */
-#define MANDATORY_LOCK(inode) \
-	(IS_MANDLOCK(inode) && ((inode)->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
-
-static inline int locks_verify_locked(struct inode *inode)
-{
-	if (MANDATORY_LOCK(inode))
-		return locks_mandatory_locked(inode);
+	if (mnt->mnt_sb->s_flags & MS_RDONLY)
+		return 1;
 	return 0;
 }
 
-extern int rw_verify_area(int, struct file *, loff_t *, size_t);
-
-static inline int locks_verify_truncate(struct inode *inode,
-				    struct file *filp,
-				    loff_t size)
-{
-	if (inode->i_flock && MANDATORY_LOCK(inode))
-		return locks_mandatory_area(
-			FLOCK_VERIFY_WRITE, inode, filp,
-			size < inode->i_size ? size : inode->i_size,
-			(size < inode->i_size ? inode->i_size - size
-			 : size - inode->i_size)
-		);
-	return 0;
-}
-
-static inline int break_lease(struct inode *inode, unsigned int mode)
-{
-	if (inode->i_flock)
-		return __break_lease(inode, mode);
-	return 0;
-}
-
-/* fs/open.c */
-
-extern int do_truncate(struct dentry *, struct vfsmount *mnt, loff_t start,
-		       unsigned int time_attrs, struct file *filp);
-extern long do_sys_open(int fdf, const char __user *filename, int flags,
-			int mode);
-extern struct file *filp_open(const char *, int, int);
-extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
-extern int filp_close(struct file *, fl_owner_t id);
-extern char * getname(const char __user *);
-
-/* fs/dcache.c */
-extern void __init vfs_caches_init_early(void);
-extern void __init vfs_caches_init(unsigned long);
-
-#define __getname()	kmem_cache_alloc(names_cachep, SLAB_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifndef CONFIG_AUDITSYSCALL
-#define putname(name)   __putname(name)
-#else
-extern void putname(const char *name);
-#endif
-
-extern int register_blkdev(unsigned int, const char *);
-extern int unregister_blkdev(unsigned int, const char *);
-extern struct block_device *bdget(dev_t);
-extern void bd_set_size(struct block_device *, loff_t size);
-extern void bd_forget(struct inode *inode);
-extern void bdput(struct block_device *);
-extern struct block_device *open_by_devnum(dev_t, unsigned);
-extern struct file_operations def_blk_fops;
-extern struct address_space_operations def_blk_aops;
-extern struct file_operations def_chr_fops;
-extern struct file_operations bad_sock_fops;
-extern struct file_operations def_fifo_fops;
-extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
-extern int blkdev_driver_ioctl(struct inode *, struct file *,
-			       struct gendisk *, unsigned, unsigned long);
-extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *, mode_t, unsigned);
-extern int blkdev_put(struct block_device *);
-extern int bd_claim(struct block_device *, void *);
-extern void bd_release(struct block_device *);
-
-/* fs/char_dev.c */
-#define CHRDEV_MAJOR_HASH_SIZE	255
-extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
-extern int register_chrdev_region(dev_t, unsigned, const char *);
-extern int register_chrdev(unsigned int, const char *,
-			   struct file_operations *);
-extern int unregister_chrdev(unsigned int, const char *);
-extern void unregister_chrdev_region(dev_t, unsigned);
-extern int chrdev_open(struct inode *, struct file *);
-extern void chrdev_show(struct seq_file *,off_t);
-
-/* fs/block_dev.c */
-#define BLKDEV_MAJOR_HASH_SIZE	255
-#define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
-extern const char *__bdevname(dev_t, char *buffer);
-extern const char *bdevname(struct block_device *bdev, char *buffer);
-extern struct block_device *lookup_bdev(const char *);
-extern struct block_device *open_bdev_excl(const char *, int, void *);
-extern void close_bdev_excl(struct block_device *);
-extern void blkdev_show(struct seq_file *,off_t);
-
-extern void init_special_inode(struct inode *, umode_t, dev_t);
-
-/* Invalid inode operations -- fs/bad_inode.c */
-extern void make_bad_inode(struct inode *);
-extern int is_bad_inode(struct inode *);
-
-extern struct file_operations read_fifo_fops;
-extern struct file_operations write_fifo_fops;
-extern struct file_operations rdwr_fifo_fops;
-
-extern int fs_may_remount_ro(struct super_block *);
-
-/*
- * return READ, READA, or WRITE
- */
-#define bio_rw(bio)		((bio)->bi_rw & (RW_MASK | RWA_MASK))
-
-/*
- * return data direction, READ or WRITE
- */
-#define bio_data_dir(bio)	((bio)->bi_rw & 1)
-
-extern int check_disk_change(struct block_device *);
-extern int invalidate_inodes(struct super_block *);
-extern int __invalidate_device(struct block_device *);
-extern int invalidate_partition(struct gendisk *, int);
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
-					pgoff_t start, pgoff_t end);
-unsigned long invalidate_inode_pages(struct address_space *mapping);
-static inline void invalidate_remote_inode(struct inode *inode)
-{
-	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-	    S_ISLNK(inode->i_mode))
-		invalidate_inode_pages(inode->i_mapping);
-}
-extern int invalidate_inode_pages2(struct address_space *mapping);
-extern int invalidate_inode_pages2_range(struct address_space *mapping,
-					 pgoff_t start, pgoff_t end);
-extern int write_inode_now(struct inode *, int);
-extern int filemap_fdatawrite(struct address_space *);
-extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait(struct address_space *);
-extern int filemap_write_and_wait(struct address_space *mapping);
-extern int filemap_write_and_wait_range(struct address_space *mapping,
-				        loff_t lstart, loff_t lend);
-extern void sync_supers(void);
-extern void sync_filesystems(int wait);
-extern void emergency_sync(void);
-extern void emergency_remount(void);
-extern int do_remount_sb(struct super_block *sb, int flags,
-			 void *data, int force);
-extern sector_t bmap(struct inode *, sector_t);
-extern int notify_change(struct dentry *, struct vfsmount *, struct iattr *);
-extern int permission(struct inode *, int, struct nameidata *);
-extern int generic_permission(struct inode *, int,
-		int (*check_acl)(struct inode *, int));
-
-extern int get_write_access(struct inode *);
-extern int deny_write_access(struct file *);
-static inline void put_write_access(struct inode * inode)
-{
-	atomic_dec(&inode->i_writecount);
-}
-static inline void allow_write_access(struct file *file)
-{
-	if (file)
-		atomic_inc(&file->f_dentry->d_inode->i_writecount);
-}
-extern int do_pipe(int *);
-
-extern int open_namei(int dfd, const char *, int, int, struct nameidata *);
-extern int may_open(struct nameidata *, int, int);
-
-extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
-extern struct file * open_exec(const char *);
- 
-/* fs/dcache.c -- generic fs support functions */
-extern int is_subdir(struct dentry *, struct dentry *);
-extern ino_t find_inode_number(struct dentry *, struct qstr *);
-
-#include <linux/err.h>
-
-/* needed for stackable file system support */
-extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
-
-extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
-
-extern void inode_init_once(struct inode *);
-extern void iput(struct inode *);
-extern struct inode * igrab(struct inode *);
-extern ino_t iunique(struct super_block *, ino_t);
-extern int iunique_register(struct inode *inode, int max_reserved);
-extern void iunique_unregister(struct inode *inode);
-extern int inode_needs_sync(struct inode *inode);
-extern void generic_delete_inode(struct inode *inode);
-extern void generic_drop_inode(struct inode *inode);
-
-extern struct inode *ilookup5_nowait(struct super_block *sb,
-		unsigned long hashval, int (*test)(struct inode *, void *),
-		void *data);
-extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
-		int (*test)(struct inode *, void *), void *data);
-extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
-
-extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
-extern struct inode * iget_locked(struct super_block *, unsigned long);
-extern void unlock_new_inode(struct inode *);
-
-static inline struct inode *iget(struct super_block *sb, unsigned long ino)
-{
-	struct inode *inode = iget_locked(sb, ino);
-	
-	if (inode && (inode->i_state & I_NEW)) {
-		sb->s_op->read_inode(inode);
-		unlock_new_inode(inode);
-	}
-
-	return inode;
-}
-
-extern void __iget(struct inode * inode);
-extern void clear_inode(struct inode *);
-extern void destroy_inode(struct inode *);
-extern struct inode *new_inode(struct super_block *);
-extern struct inode *new_inode_registered(struct super_block *sb,
-					  int max_reserved);
-extern int remove_suid(struct dentry *, struct vfsmount *);
-extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
-extern struct semaphore iprune_sem;
-
-extern void __insert_inode_hash(struct inode *, unsigned long hashval);
-extern void remove_inode_hash(struct inode *);
-static inline void insert_inode_hash(struct inode *inode) {
-	__insert_inode_hash(inode, inode->i_ino);
-}
-
-extern struct file * get_empty_filp(void);
-extern void file_move(struct file *f, struct list_head *list);
-extern void file_kill(struct file *f);
-struct bio;
-extern void submit_bio(int, struct bio *);
-extern int bdev_read_only(struct block_device *);
-extern int set_blocksize(struct block_device *, int);
-extern int sb_set_blocksize(struct super_block *, int);
-extern int sb_min_blocksize(struct super_block *, int);
-
-extern int generic_file_mmap(struct file *, struct vm_area_struct *);
-extern int generic_file_mmap_delayed(struct file *, struct vm_area_struct *);
-extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
-extern int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
-extern ssize_t generic_file_read(struct file *, char __user *, size_t, loff_t *);
-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
-extern ssize_t generic_file_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t generic_file_aio_read(struct kiocb *, char __user *, size_t, loff_t);
-extern ssize_t __generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t *);
-extern ssize_t generic_file_aio_write(struct kiocb *, const char __user *, size_t, loff_t);
-extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *,
-		unsigned long, loff_t *);
-extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
-		unsigned long *, loff_t, loff_t *, size_t, size_t);
-extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
-		unsigned long, loff_t, loff_t *, size_t, ssize_t);
-extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
-ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov,
-				unsigned long nr_segs, loff_t *ppos);
-extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
-extern void do_generic_mapping_read(struct address_space *mapping,
-				    struct file_ra_state *, struct file *,
-				    loff_t *, read_descriptor_t *, read_actor_t);
-extern void
-file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
-extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, 
-	unsigned long nr_segs, loff_t *ppos);
-ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, 
-			unsigned long nr_segs, loff_t *ppos);
-extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
-extern int generic_file_open(struct inode * inode, struct file * filp);
-extern int nonseekable_open(struct inode * inode, struct file * filp);
-
-#ifdef CONFIG_FS_XIP
-extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
-			     loff_t *ppos);
-extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos,
-				 size_t count, read_actor_t actor,
-				 void *target);
-extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
-extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
-			      size_t len, loff_t *ppos);
-extern int xip_truncate_page(struct address_space *mapping, loff_t from);
-#else
-static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
-{
-	return 0;
-}
-#endif
-
-static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
-					read_descriptor_t * desc,
-					read_actor_t actor)
-{
-	do_generic_mapping_read(filp->f_mapping,
-				&filp->f_ra,
-				filp,
-				ppos,
-				desc,
-				actor);
-}
-
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-	struct block_device *bdev, const struct iovec *iov, loff_t offset,
-	unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
-	int lock_type);
-
-enum {
-	DIO_LOCKING = 1, /* need locking between buffered and direct access */
-	DIO_NO_LOCKING,  /* bdev; no locking at all between buffered/direct */
-	DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
-};
-
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
-	dio_iodone_t end_io)
-{
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_blocks, end_io, DIO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
-	dio_iodone_t end_io)
-{
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_blocks, end_io, DIO_NO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
-	dio_iodone_t end_io)
-{
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_blocks, end_io, DIO_OWN_LOCKING);
-}
-
-extern struct file_operations generic_ro_fops;
-
-#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
-
-extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
-extern int vfs_follow_link(struct nameidata *, const char *);
-extern int page_readlink(struct dentry *, char __user *, int);
-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *, void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
-		gfp_t gfp_mask);
-extern int page_symlink(struct inode *inode, const char *symname, int len);
-extern struct inode_operations page_symlink_inode_operations;
-extern int generic_readlink(struct dentry *, char __user *, int);
-extern void generic_fillattr(struct inode *, struct kstat *);
-extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-void inode_add_bytes(struct inode *inode, loff_t bytes);
-void inode_sub_bytes(struct inode *inode, loff_t bytes);
-loff_t inode_get_bytes(struct inode *inode);
-void inode_set_bytes(struct inode *inode, loff_t bytes);
-
-extern int vfs_readdir(struct file *, filldir_t, void *);
-
-extern int vfs_stat(char __user *, struct kstat *);
-extern int vfs_lstat(char __user *, struct kstat *);
-extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
-extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
-extern int vfs_fstat(unsigned int, struct kstat *);
-
-extern int vfs_ioctl(struct file *, unsigned int, unsigned int, unsigned long);
-
-extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_super(struct block_device *);
-extern struct super_block *user_get_super(dev_t);
-extern void drop_super(struct super_block *sb);
-
-extern int dcache_dir_open(struct inode *, struct file *);
-extern int dcache_dir_close(struct inode *, struct file *);
-extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
-extern int dcache_readdir(struct file *, void *, filldir_t);
-extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int simple_statfs(struct dentry *, struct kstatfs *);
-extern int simple_link(struct dentry *, struct inode *, struct dentry *);
-extern int simple_unlink(struct inode *, struct dentry *);
-extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
-extern int simple_sync_file(struct file *, struct dentry *, int);
-extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
-extern int simple_prepare_write(struct file *file, struct page *page,
-			unsigned offset, unsigned to);
-extern int simple_commit_write(struct file *file, struct page *page,
-				unsigned offset, unsigned to);
-
-extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
-extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
-extern struct file_operations simple_dir_operations;
-extern struct inode_operations simple_dir_inode_operations;
-struct tree_descr { char *name; struct file_operations *ops; int mode; };
-struct dentry *d_alloc_name(struct dentry *, const char *);
-extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
-extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
-extern void simple_release_fs(struct vfsmount **mount, int *count);
-
-extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
-
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct page *, struct page *);
-#else
-#define buffer_migrate_page NULL
-#endif
-
-extern int inode_change_ok(struct inode *, struct iattr *);
-extern int __must_check inode_setattr(struct inode *, struct iattr *);
-
-extern void file_update_time(struct file *file);
-extern void mapping_update_time(struct file *file);
-
-static inline ino_t parent_ino(struct dentry *dentry)
-{
-	ino_t res;
-
-	spin_lock(&dentry->d_lock);
-	res = dentry->d_parent->d_inode->i_ino;
-	spin_unlock(&dentry->d_lock);
-	return res;
-}
-
-/* kernel/fork.c */
-extern int unshare_files(void);
-
-/* Transaction based IO helpers */
-
-/*
- * An argresp is stored in an allocated page and holds the
- * size of the argument or response, along with its content
- */
-struct simple_transaction_argresp {
-	ssize_t size;
-	char data[0];
-};
-
-#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
-
-char *simple_transaction_get(struct file *file, const char __user *buf,
-				size_t size);
-ssize_t simple_transaction_read(struct file *file, char __user *buf,
-				size_t size, loff_t *pos);
-int simple_transaction_release(struct inode *inode, struct file *file);
-
-static inline void simple_transaction_set(struct file *file, size_t n)
-{
-	struct simple_transaction_argresp *ar = file->private_data;
-
-	BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
-
-	/*
-	 * The barrier ensures that ar->size will really remain zero until
-	 * ar->data is ready for reading.
-	 */
-	smp_mb();
-	ar->size = n;
-}
-
-/*
- * simple attribute files
- *
- * These attributes behave similar to those in sysfs:
- *
- * Writing to an attribute immediately sets a value, an open file can be
- * written to multiple times.
- *
- * Reading from an attribute creates a buffer from the value that might get
- * read with multiple read calls. When the attribute has been read
- * completely, no further read calls are possible until the file is opened
- * again.
- *
- * All attributes contain a text representation of a numeric value
- * that are accessed with the get() and set() functions.
- */
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)		\
-static int __fops ## _open(struct inode *inode, struct file *file)	\
-{									\
-	__simple_attr_check_format(__fmt, 0ull);			\
-	return simple_attr_open(inode, file, __get, __set, __fmt);	\
-}									\
-static struct file_operations __fops = {				\
-	.owner	 = THIS_MODULE,						\
-	.open	 = __fops ## _open,					\
-	.release = simple_attr_close,					\
-	.read	 = simple_attr_read,					\
-	.write	 = simple_attr_write,					\
-};
-
-static inline void __attribute__((format(printf, 1, 2)))
-__simple_attr_check_format(const char *fmt, ...)
-{
-	/* don't do anything, just let the compiler check the arguments; */
-}
-
-int simple_attr_open(struct inode *inode, struct file *file,
-		     u64 (*get)(void *), void (*set)(void *, u64),
-		     const char *fmt);
-int simple_attr_close(struct inode *inode, struct file *file);
-ssize_t simple_attr_read(struct file *file, char __user *buf,
-			 size_t len, loff_t *ppos);
-ssize_t simple_attr_write(struct file *file, const char __user *buf,
-			  size_t len, loff_t *ppos);
-
-
-#ifdef CONFIG_SECURITY
-static inline char *alloc_secdata(void)
-{
-	return (char *)get_zeroed_page(GFP_KERNEL);
-}
-
-static inline void free_secdata(void *secdata)
-{
-	free_page((unsigned long)secdata);
-}
-#else
-static inline char *alloc_secdata(void)
-{
-	return (char *)1;
-}
-
-static inline void free_secdata(void *secdata)
-{ }
-#endif	/* CONFIG_SECURITY */
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_FS_H */
-
-/* File is being opened for execution. Primary users of this flag are
-   distributed filesystems that can use it to achieve correct ETXTBUSY
-   behavior for cross-node execution/opening_for_writing of files */
-#define FMODE_EXEC	16
-
-#define SEEK_SET	0	/* seek relative to beginning of file */
-#define SEEK_CUR	1	/* seek relative to current file position */
-#define SEEK_END	2	/* seek relative to end of file */
-
-#define i_private u.generic_ip
-
-/**
- * inc_nlink - directly increment an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.  Currently,
- * it is only here for parity with dec_nlink().
- */
 static inline void inc_nlink(struct inode *inode)
 {
-        inode->i_nlink++;
+	inode->i_nlink++;
 }
 
-/**
- * drop_nlink - directly drop an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.  In cases
- * where we are attempting to track writes to the
- * filesystem, a decrement to zero means an imminent
- * write when the file is truncated and actually unlinked
- * on the filesystem.
- */
 static inline void drop_nlink(struct inode *inode)
 {
-        inode->i_nlink--;
+	inode->i_nlink--;
 }
 
-/**
- * clear_nlink - directly zero an inode's link count
- * @inode: inode
- * 
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.  See
- * drop_nlink() for why we care about i_nlink hitting zero.
- **/
 static inline void clear_nlink(struct inode *inode)
 {
-        inode->i_nlink = 0;
+	inode->i_nlink = 0;
 }
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/gfp.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/gfp.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/gfp.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_LINUX_GFP_H
+#define BACKPORT_LINUX_GFP_H
+
+#include_next <linux/gfp.h>
+
+/* This equals 0, but use constants in case they ever change */
+#define GFP_NOWAIT     (GFP_ATOMIC & ~__GFP_HIGH)
+
+#endif /* BACKPORT_LINUX_GFP_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/highmem.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/highmem.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/highmem.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,36 @@
+#ifndef LINUX_HIGHMEM_H
+#define LINUX_HIGHMEM_H
+
+#include_next <linux/highmem.h>
+
+static inline void zero_user_segments(struct page *page,
+	unsigned start1, unsigned end1,
+	unsigned start2, unsigned end2)
+{
+	void *kaddr = kmap_atomic(page, KM_USER0);
+
+	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+
+	if (end1 > start1)
+		memset(kaddr + start1, 0, end1 - start1);
+
+	if (end2 > start2)
+		memset(kaddr + start2, 0, end2 - start2);
+
+	kunmap_atomic(kaddr, KM_USER0);
+	flush_dcache_page(page);
+}
+
+static inline void zero_user_segment(struct page *page,
+	unsigned start, unsigned end)
+{
+	zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+	unsigned start, unsigned size)
+{
+	zero_user_segments(page, start, start + size, 0, 0);
+}
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inet.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inet.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inet.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,4 +6,203 @@
 #define INET_ADDRSTRLEN		(16)
 #define INET6_ADDRSTRLEN	(48)
 
+#define IN6PTON_XDIGIT	  	0x00010000
+#define IN6PTON_DIGIT	   	0x00020000
+#define IN6PTON_COLON_MASK      0x00700000
+#define IN6PTON_COLON_1		0x00100000      /* single : requested */
+#define IN6PTON_COLON_2	 	0x00200000      /* second : requested */
+#define IN6PTON_COLON_1_2       0x00400000      /* :: requested */
+#define IN6PTON_DOT	     	0x00800000      /* . */
+#define IN6PTON_DELIM	   	0x10000000
+#define IN6PTON_NULL	    	0x20000000      /* first/tail */
+#define IN6PTON_UNKNOWN	 	0x40000000
+
+static inline int xdigit2bin(char c, int delim)
+{
+	if (c == delim || c == '\0')
+		return IN6PTON_DELIM;
+	if (c == ':')
+		return IN6PTON_COLON_MASK;
+	if (c == '.')
+		return IN6PTON_DOT;
+	if (c >= '0' && c <= '9')
+		return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0'));
+	if (c >= 'a' && c <= 'f')
+		return (IN6PTON_XDIGIT | (c - 'a' + 10));
+	if (c >= 'A' && c <= 'F')
+		return (IN6PTON_XDIGIT | (c - 'A' + 10));
+	if (delim == -1)
+		return IN6PTON_DELIM;
+	return IN6PTON_UNKNOWN;
+}
+
+static inline int in4_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s;
+	u8 *d;
+	u8 dbuf[4];
+	int ret = 0;
+	int i;
+	int w = 0;
+
+	if (srclen < 0)
+		srclen = strlen(src);
+	s = src;
+	d = dbuf;
+	i = 0;
+	while(1) {
+		int c;
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
+			goto out;
+		}
+		if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			if (w == 0)
+				goto out;
+			*d++ = w & 0xff;
+			w = 0;
+			i++;
+			if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+				if (i != 4)
+					goto out;
+				break;
+			}
+			goto cont;
+		}
+		w = (w * 10) + c;
+		if ((w & 0xffff) > 255) {
+			goto out;
+		}
+cont:
+		if (i >= 4)
+			goto out;
+		s++;
+		srclen--;
+	}
+	ret = 1;
+	memcpy(dst, dbuf, sizeof(dbuf));
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
+static inline int in6_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s, *tok = NULL;
+	u8 *d, *dc = NULL;
+	u8 dbuf[16];
+	int ret = 0;
+	int i;
+	int state = IN6PTON_COLON_1_2 | IN6PTON_XDIGIT | IN6PTON_NULL;
+	int w = 0;
+
+	memset(dbuf, 0, sizeof(dbuf));
+
+	s = src;
+	d = dbuf;
+	if (srclen < 0)
+		srclen = strlen(src);
+
+	while (1) {
+		int c;
+
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & state))
+			goto out;
+		if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			/* process one 16-bit word */
+			if (!(state & IN6PTON_NULL)) {
+				*d++ = (w >> 8) & 0xff;
+				*d++ = w & 0xff;
+			}
+			w = 0;
+			if (c & IN6PTON_DELIM) {
+				/* We've processed last word */
+				break;
+			}
+			/*
+			 * COLON_1 => XDIGIT
+			 * COLON_2 => XDIGIT|DELIM
+			 * COLON_1_2 => COLON_2
+			 */
+			switch (state & IN6PTON_COLON_MASK) {
+			case IN6PTON_COLON_2:
+				dc = d;
+				state = IN6PTON_XDIGIT | IN6PTON_DELIM;
+				if (dc - dbuf >= sizeof(dbuf))
+					state |= IN6PTON_NULL;
+				break;
+			case IN6PTON_COLON_1|IN6PTON_COLON_1_2:
+				state = IN6PTON_XDIGIT | IN6PTON_COLON_2;
+				break;
+			case IN6PTON_COLON_1:
+				state = IN6PTON_XDIGIT;
+				break;
+			case IN6PTON_COLON_1_2:
+				state = IN6PTON_COLON_2;
+				break;
+			default:
+				state = 0;
+			}
+			tok = s + 1;
+			goto cont;
+		}
+
+		if (c & IN6PTON_DOT) {
+			ret = in4_pton(tok ? tok : s, srclen + (int)(s - tok), d, delim, &s);
+			if (ret > 0) {
+				d += 4;
+				break;
+			}
+			goto out;
+		}
+
+		w = (w << 4) | (0xff & c);
+		state = IN6PTON_COLON_1 | IN6PTON_DELIM;
+		if (!(w & 0xf000)) {
+			state |= IN6PTON_XDIGIT;
+		}
+		if (!dc && d + 2 < dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_COLON_1_2;
+			state &= ~IN6PTON_DELIM;
+		}
+		if (d + 2 >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_COLON_1|IN6PTON_COLON_1_2);
+		}
+cont:
+		if ((dc && d + 4 < dbuf + sizeof(dbuf)) ||
+		    d + 4 == dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_DOT;
+		}
+		if (d >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_XDIGIT|IN6PTON_COLON_MASK);
+		}
+		s++;
+		srclen--;
+	}
+
+	i = 15; d--;
+
+	if (dc) {
+		while(d >= dc)
+			dst[i--] = *d--;
+		while(i >= dc - dbuf)
+			dst[i--] = 0;
+		while(i >= 0)
+			dst[i--] = *d--;
+	} else
+		memcpy(dst, dbuf, sizeof(dbuf));
+
+	ret = 1;
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
 #endif /* __BACKPORT_LINUX_INET_H_TO_2_6_26__ */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inetdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,16 +7,24 @@
 static inline struct net_device *xxx_ip_dev_find(u32 addr)
 {
 	struct net_device *dev;
-	u32 ip;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	struct in_device *in_dev;
 
 	read_lock(&dev_base_lock);
-	for (dev = dev_base; dev; dev = dev->next) {
-		ip = inet_select_addr(dev, 0, RT_SCOPE_HOST);
-		if (ip == addr) {
-			dev_hold(dev);
-			break;
+	for (dev = dev_base; dev; dev = dev->next)
+		if ((in_dev = in_dev_get(dev))) {
+			for (ifap = &in_dev->ifa_list; (ifa = *ifap);
+			     ifap = &ifa->ifa_next) {
+				if (addr == ifa->ifa_address) {
+					dev_hold(dev);
+					in_dev_put(in_dev);
+					goto found;
+				}
+			}
+			in_dev_put(in_dev);
 		}
-	}
+found:
 	read_unlock(&dev_base_lock);
 
 	return dev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kallsyms.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kallsyms.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kallsyms.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,5 +6,4 @@
 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
 			 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
-
 #endif  /* _BACKPORT_LINUX_KALLSYMSPATH_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kernel.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kernel.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/kernel.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -2,8 +2,11 @@
 #define BACKPORT_KERNEL_H_2_6_22
 
 #include_next <linux/kernel.h>
+#include <asm/errno.h>
+#include <asm/string.h>
 
 #define USHORT_MAX     ((u16)(~0U))
+#define LLONG_MAX      ((long long)(~0ULL>>1))
 
 #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
 
@@ -13,6 +16,27 @@
 
 #include <linux/log2.h>
 
+static inline int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
+{
+       char *tail;
+       unsigned long val;
+       size_t len;
+
+       *res = 0;
+       len = strlen(cp);
+       if (len == 0)
+               return -EINVAL;
+
+       val = simple_strtoul(cp, &tail, base);
+       if ((*tail == '\0') ||
+               ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
+               *res = val;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/log2.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/log2.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/log2.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -64,6 +64,15 @@
 	return 1UL << fls_long(n - 1);
 }
 
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+	return 1UL << (fls_long(n) - 1);
+}
+
 /**
  * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
  * @n - parameter
@@ -166,4 +175,20 @@
 	__roundup_pow_of_two(n)			\
  )
 
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n)			\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n == 1) ? 0 :			\
+		(1UL << ilog2(n))) :		\
+	__rounddown_pow_of_two(n)		\
+ )
+
 #endif /* _LINUX_LOG2_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/magic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/magic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/magic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef BACKPORT_LINUX_MAGIC_H
+#define BACKPORT_LINUX_MAGIC_H
+
+#define NFS_SUPER_MAGIC		0x6969
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mm.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mm.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mm.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,21 +7,28 @@
 #include <asm/highmem.h>
 #endif
 
-/*
- * Determine if an address is within the vmalloc range
- *
- * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
- * is no special casing required.
- */
-static inline int is_vmalloc_addr(const void *x)
+#define VM_CAN_NONLINEAR 0x08000000     /* Has ->fault & does nonlinear pages */
+
+#define is_vmalloc_addr(x) ((unsigned long)(x) >= VMALLOC_START && (unsigned long)(x) < VMALLOC_END)
+
+struct shrinker {
+	shrinker_t		shrink;
+	struct list_head	list;
+	int			seeks;  /* seeks to recreate an obj */
+	long			nr;     /* objs pending delete */
+};
+
+static inline void task_io_account_cancelled_write(size_t bytes)
 {
-#ifdef CONFIG_MMU
-	unsigned long addr = (unsigned long)x;
+}
 
-	return addr >= VMALLOC_START && addr < VMALLOC_END;
-#else
-	return 0;
-#endif
+static inline void cancel_dirty_page(struct page *page, unsigned int account_size)
+{
+	if (TestClearPageDirty(page)) {
+		struct address_space *mapping = page->mapping;
+		if (mapping && account_size)
+			task_io_account_cancelled_write(account_size);
+	}
 }
 
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mount.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mount.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mount.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -2,13 +2,10 @@
 #define BACKPORT_LINUX_MOUNT_H
 
 #include_next <linux/mount.h>
+#include <linux/fs.h>
 
-struct file_system_type;
-static inline struct vfsmount *vfs_kern_mount(struct file_system_type *type,
-					      int flags, const char *name, void *data)
-{
-	/* NB: First field of file_system_type is the name */
-	return do_kern_mount((const char *)type, flags, name, data);
-}
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+extern int init_mnt_writers(void);
 
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mpage.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mpage.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/mpage.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,13 @@
+#ifndef BACKPORT_LINUX_MPAGE_H
+#define BACKPORT_LINUX_MPAGE_H
+
+#include_next <linux/mpage.h>
+#include <linux/pagevec.h>
+
+typedef int (*backport_writepage_t)(struct page *page, struct writeback_control *wbc,
+                                void *data);
+
+extern int write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data);
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/namei.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/namei.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/namei.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+#ifndef BACKPORT_LINUX_NAMEI_H
+#define BACKPORT_LINUX_NAMEI_H
+
+#include_next <linux/namei.h>
+#include <linux/mount.h>
+
+static inline int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
+		    const char *name, unsigned int flags,
+		    struct nameidata *nd)
+{
+	int retval;
+
+	/* same as do_path_lookup */
+	nd->last_type = LAST_ROOT;
+	nd->flags = flags;
+	nd->depth = 0;
+
+	nd->dentry = dentry;
+	nd->mnt = mnt;
+	mntget(nd->mnt);
+	dget(nd->dentry);
+
+	retval = path_walk(name, nd);
+
+	return retval;
+}
+#endif /* BACKPORT_LINUX_NAMEI_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/net.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/net.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/net.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -4,6 +4,8 @@
 #include_next <linux/net.h>
 #include <linux/random.h>
 
+extern ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
+
 static inline
 int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
 			 int *addrlen)
@@ -18,19 +20,7 @@
 	return sock->ops->getname(sock, addr, addrlen, 1);
 }
 
-enum sock_shutdown_cmd {
-       SHUT_RD         = 0,
-       SHUT_WR         = 1,
-       SHUT_RDWR       = 2,
-};
-
 static inline
-int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
-{
-	return sock->ops->shutdown(sock, how);
-}
-
-static inline
 int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
 {
 	return sock->ops->bind(sock, addr, addrlen);
@@ -42,17 +32,36 @@
 	return sock->ops->listen(sock, backlog);
 }
 
+extern int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+
 static inline
-int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, 
-		int flags)
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags)
 {
-	return sock->ops->connect(sock, addr, addrlen, 0);
+	return sock->ops->connect(sock, addr, addrlen, flags);
 }
 
+enum sock_shutdown_cmd {
+	SHUT_RD		= 0,
+	SHUT_WR		= 1,
+	SHUT_RDWR	= 2,
+};
+
+
+static inline int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd flags)
+{
+	return sock->ops->shutdown(sock, flags);
+}
+
 static inline
 int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 		size_t size, int flags)
 {
-	return sock->ops->sendpage(sock, page, offset, size, flags);
+	if (sock->ops->sendpage)
+		return sock->ops->sendpage(sock, page, offset, size, flags);
+
+	return sock_no_sendpage(sock, page, offset, size, flags);
 }
+
+extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -16,6 +16,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_fs.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,617 +0,0 @@
-/*
- *  linux/include/linux/nfs_fs.h
- *
- *  Copyright (C) 1992  Rick Sladkey
- *
- *  OS-specific nfs filesystem definitions and declarations
- */
-
-#ifndef _LINUX_NFS_FS_H
-#define _LINUX_NFS_FS_H
-
-#define NFS_SUPER_MAGIC         0x6969
-
-/* Default timeout values */
-#define NFS_DEF_UDP_TIMEO	(11)
-#define NFS_DEF_UDP_RETRANS	(3)
-#define NFS_DEF_TCP_TIMEO	(600)
-#define NFS_DEF_TCP_RETRANS	(2)
-
-#define NFS_MAX_UDP_TIMEOUT	(60*HZ)
-#define NFS_MAX_TCP_TIMEOUT	(600*HZ)
-
-#define NFS_DEF_ACREGMIN        (3)
-#define NFS_DEF_ACREGMAX        (60)
-#define NFS_DEF_ACDIRMIN        (30)
-#define NFS_DEF_ACDIRMAX        (60)
-
-
-/*
- * When flushing a cluster of dirty pages, there can be different
- * strategies:
- */
-#define FLUSH_SYNC		1	/* file being synced, or contention */
-#define FLUSH_STABLE		4	/* commit to stable storage */
-#define FLUSH_LOWPRI		8	/* low priority background flush */
-#define FLUSH_HIGHPRI		16	/* high priority memory reclaim flush */
-#define FLUSH_NOCOMMIT		32	/* Don't send the NFSv3/v4 COMMIT */
-#define FLUSH_INVALIDATE	64	/* Invalidate the page cache */
-#define FLUSH_NOWRITEPAGE	128	/* Don't call writepage() */
-
-#ifdef __KERNEL__
-
-#include <linux/in.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/namei.h>
-#include <linux/pagemap.h>
-#include <linux/rbtree.h>
-#include <linux/rwsem.h>
-#include <linux/wait.h>
-
-#include <linux/sunrpc/debug.h>
-#include <linux/sunrpc/auth.h>
-#include <linux/sunrpc/clnt.h>
-
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-#include <linux/nfs_xdr.h>
-#include <linux/nfs_fs_sb.h>
-
-#include <linux/mempool.h>
-#include <linux/path.h>
-
-/*
- * These are the default flags for swap requests
- */
-#define NFS_RPC_SWAPFLAGS		(RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
-
-/*
- * NFSv3/v4 Access mode cache entry
- */
-struct nfs_access_entry {
-	struct rb_node		rb_node;
-	struct list_head	lru;
-	unsigned long		jiffies;
-	struct rpc_cred *	cred;
-	int			mask;
-};
-
-struct nfs4_state;
-struct nfs_open_context {
-	atomic_t count;
-	struct path path;
-	struct rpc_cred *cred;
-	struct nfs4_state *state;
-	fl_owner_t lockowner;
-	int mode;
-
-	unsigned long flags;
-#define NFS_CONTEXT_ERROR_WRITE		(0)
-	int error;
-
-	struct list_head list;
-
-	__u64 dir_cookie;
-};
-
-/*
- * NFSv4 delegation
- */
-struct nfs_delegation;
-
-struct posix_acl;
-
-/*
- * nfs fs inode data in memory
- */
-struct nfs_inode {
-	/*
-	 * The 64bit 'inode number'
-	 */
-	__u64 fileid;
-
-	/*
-	 * NFS file handle
-	 */
-	struct nfs_fh		fh;
-
-	/*
-	 * Various flags
-	 */
-	unsigned long		flags;			/* atomic bit ops */
-	unsigned long		cache_validity;		/* bit mask */
-
-	/*
-	 * read_cache_jiffies is when we started read-caching this inode.
-	 * attrtimeo is for how long the cached information is assumed
-	 * to be valid. A successful attribute revalidation doubles
-	 * attrtimeo (up to acregmax/acdirmax), a failure resets it to
-	 * acregmin/acdirmin.
-	 *
-	 * We need to revalidate the cached attrs for this inode if
-	 *
-	 *	jiffies - read_cache_jiffies > attrtimeo
-	 */
-	unsigned long		read_cache_jiffies;
-	unsigned long		attrtimeo;
-	unsigned long		attrtimeo_timestamp;
-	__u64			change_attr;		/* v4 only */
-
-	unsigned long		last_updated;
-	/* "Generation counter" for the attribute cache. This is
-	 * bumped whenever we update the metadata on the
-	 * server.
-	 */
-	unsigned long		cache_change_attribute;
-
-	struct rb_root		access_cache;
-	struct list_head	access_cache_entry_lru;
-	struct list_head	access_cache_inode_lru;
-#ifdef CONFIG_NFS_V3_ACL
-	struct posix_acl	*acl_access;
-	struct posix_acl	*acl_default;
-#endif
-
-	/*
-	 * This is the cookie verifier used for NFSv3 readdir
-	 * operations
-	 */
-	__be32			cookieverf[2];
-
-	/*
-	 * This is the list of dirty unwritten pages.
-	 */
-	struct radix_tree_root	nfs_page_tree;
-
-	unsigned long		ncommit,
-				npages;
-
-	/* Open contexts for shared mmap writes */
-	struct list_head	open_files;
-
-	/* Number of in-flight sillydelete RPC calls */
-	atomic_t		silly_count;
-	/* List of deferred sillydelete requests */
-	struct hlist_head	silly_list;
-	wait_queue_head_t	waitqueue;
-
-#ifdef CONFIG_NFS_V4
-	struct nfs4_cached_acl	*nfs4_acl;
-        /* NFSv4 state */
-	struct list_head	open_states;
-	struct nfs_delegation	*delegation;
-	int			 delegation_state;
-	struct rw_semaphore	rwsem;
-#endif /* CONFIG_NFS_V4*/
-	struct inode		vfs_inode;
-};
-
-/*
- * Cache validity bit flags
- */
-#define NFS_INO_INVALID_ATTR	0x0001		/* cached attrs are invalid */
-#define NFS_INO_INVALID_DATA	0x0002		/* cached data is invalid */
-#define NFS_INO_INVALID_ATIME	0x0004		/* cached atime is invalid */
-#define NFS_INO_INVALID_ACCESS	0x0008		/* cached access cred invalid */
-#define NFS_INO_INVALID_ACL	0x0010		/* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE	0x0020		/* must revalidate pagecache */
-#define NFS_INO_REVAL_FORCED	0x0040		/* force revalidation ignoring a delegation */
-
-/*
- * Bit offsets in flags field
- */
-#define NFS_INO_REVALIDATING	(0)		/* revalidating attrs */
-#define NFS_INO_ADVISE_RDPLUS	(1)		/* advise readdirplus */
-#define NFS_INO_STALE		(2)		/* possible stale inode */
-#define NFS_INO_ACL_LRU_SET	(3)		/* Inode is on the LRU list */
-#define NFS_INO_MOUNTPOINT	(4)		/* inode is remote mountpoint */
-
-static inline struct nfs_inode *NFS_I(const struct inode *inode)
-{
-	return container_of(inode, struct nfs_inode, vfs_inode);
-}
-
-static inline struct nfs_server *NFS_SB(const struct super_block *s)
-{
-	return (struct nfs_server *)(s->s_fs_info);
-}
-
-static inline struct nfs_fh *NFS_FH(const struct inode *inode)
-{
-	return &NFS_I(inode)->fh;
-}
-
-static inline struct nfs_server *NFS_SERVER(const struct inode *inode)
-{
-	return NFS_SB(inode->i_sb);
-}
-
-static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode)
-{
-	return NFS_SERVER(inode)->client;
-}
-
-static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
-{
-	return NFS_SERVER(inode)->nfs_client->rpc_ops;
-}
-
-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
-{
-	return NFS_I(inode)->cookieverf;
-}
-
-static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
-{
-	struct nfs_server *nfss = NFS_SERVER(inode);
-	return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin;
-}
-
-static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode)
-{
-	struct nfs_server *nfss = NFS_SERVER(inode);
-	return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax;
-}
-
-static inline int NFS_STALE(const struct inode *inode)
-{
-	return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
-}
-
-static inline __u64 NFS_FILEID(const struct inode *inode)
-{
-	return NFS_I(inode)->fileid;
-}
-
-static inline void set_nfs_fileid(struct inode *inode, __u64 fileid)
-{
-	NFS_I(inode)->fileid = fileid;
-}
-
-static inline void nfs_mark_for_revalidate(struct inode *inode)
-{
-	struct nfs_inode *nfsi = NFS_I(inode);
-
-	spin_lock(&inode->i_lock);
-	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
-	if (S_ISDIR(inode->i_mode))
-		nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
-	spin_unlock(&inode->i_lock);
-}
-
-static inline int nfs_server_capable(struct inode *inode, int cap)
-{
-	return NFS_SERVER(inode)->caps & cap;
-}
-
-static inline int NFS_USE_READDIRPLUS(struct inode *inode)
-{
-	return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
-}
-
-static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
-{
-	dentry->d_time = verf;
-}
-
-/**
- * nfs_save_change_attribute - Returns the inode attribute change cookie
- * @dir - pointer to parent directory inode
- * The "change attribute" is updated every time we finish an operation
- * that will result in a metadata change on the server.
- */
-static inline unsigned long nfs_save_change_attribute(struct inode *dir)
-{
-	return NFS_I(dir)->cache_change_attribute;
-}
-
-/**
- * nfs_verify_change_attribute - Detects NFS remote directory changes
- * @dir - pointer to parent directory inode
- * @chattr - previously saved change attribute
- * Return "false" if the verifiers doesn't match the change attribute.
- * This would usually indicate that the directory contents have changed on
- * the server, and that any dentries need revalidating.
- */
-static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr)
-{
-	return chattr == NFS_I(dir)->cache_change_attribute;
-}
-
-/*
- * linux/fs/nfs/inode.c
- */
-extern int nfs_sync_mapping(struct address_space *mapping);
-extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
-extern void nfs_zap_caches(struct inode *);
-extern void nfs_invalidate_atime(struct inode *);
-extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
-				struct nfs_fattr *);
-extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
-extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int nfs_permission(struct inode *, int, struct nameidata *);
-extern int nfs_open(struct inode *, struct file *);
-extern int nfs_release(struct inode *, struct file *);
-extern int nfs_attribute_timeout(struct inode *inode);
-extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
-extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
-extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
-extern int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping);
-extern int nfs_setattr(struct dentry *, struct iattr *);
-extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
-extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
-extern void put_nfs_open_context(struct nfs_open_context *ctx);
-extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
-extern u64 nfs_compat_user_ino64(u64 fileid);
-
-/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
-extern __be32 root_nfs_parse_addr(char *name); /*__init*/
-
-static inline void nfs_fattr_init(struct nfs_fattr *fattr)
-{
-	fattr->valid = 0;
-	fattr->time_start = jiffies;
-}
-
-/*
- * linux/fs/nfs/file.c
- */
-extern const struct inode_operations nfs_file_inode_operations;
-#ifdef CONFIG_NFS_V3
-extern const struct inode_operations nfs3_file_inode_operations;
-#endif /* CONFIG_NFS_V3 */
-extern const struct file_operations nfs_file_operations;
-extern const struct address_space_operations nfs_file_aops;
-
-static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
-{
-	return filp->private_data;
-}
-
-static inline struct rpc_cred *nfs_file_cred(struct file *file)
-{
-	if (file != NULL)
-		return nfs_file_open_context(file)->cred;
-	return NULL;
-}
-
-/*
- * linux/fs/nfs/xattr.c
- */
-#ifdef CONFIG_NFS_V3_ACL
-extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
-extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t);
-extern int nfs3_setxattr(struct dentry *, const char *,
-			const void *, size_t, int);
-extern int nfs3_removexattr (struct dentry *, const char *name);
-#else
-# define nfs3_listxattr NULL
-# define nfs3_getxattr NULL
-# define nfs3_setxattr NULL
-# define nfs3_removexattr NULL
-#endif
-
-/*
- * linux/fs/nfs/direct.c
- */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
-			unsigned long);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf,
-			size_t count, loff_t pos);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf,
-			size_t count, loff_t pos);
-
-/*
- * linux/fs/nfs/dir.c
- */
-extern const struct inode_operations nfs_dir_inode_operations;
-#ifdef CONFIG_NFS_V3
-extern const struct inode_operations nfs3_dir_inode_operations;
-#endif /* CONFIG_NFS_V3 */
-extern const struct file_operations nfs_dir_operations;
-extern struct dentry_operations nfs_dentry_operations;
-
-extern void nfs_force_lookup_revalidate(struct inode *dir);
-extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
-extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
-extern void nfs_access_zap_cache(struct inode *inode);
-
-/*
- * linux/fs/nfs/symlink.c
- */
-extern const struct inode_operations nfs_symlink_inode_operations;
-
-/*
- * linux/fs/nfs/sysctl.c
- */
-#ifdef CONFIG_SYSCTL
-extern int nfs_register_sysctl(void);
-extern void nfs_unregister_sysctl(void);
-#else
-#define nfs_register_sysctl() 0
-#define nfs_unregister_sysctl() do { } while(0)
-#endif
-
-/*
- * linux/fs/nfs/namespace.c
- */
-extern const struct inode_operations nfs_mountpoint_inode_operations;
-extern const struct inode_operations nfs_referral_inode_operations;
-extern int nfs_mountpoint_expiry_timeout;
-extern void nfs_release_automount_timer(void);
-
-/*
- * linux/fs/nfs/unlink.c
- */
-extern int  nfs_async_unlink(struct inode *dir, struct dentry *dentry);
-extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
-extern void nfs_block_sillyrename(struct dentry *dentry);
-extern void nfs_unblock_sillyrename(struct dentry *dentry);
-
-/*
- * linux/fs/nfs/write.c
- */
-extern int  nfs_writepage(struct page *page, struct writeback_control *wbc);
-extern int  nfs_writepages(struct address_space *, struct writeback_control *);
-extern int  nfs_flush_incompatible(struct file *file, struct page *page);
-extern int  nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
-extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
-extern void nfs_writedata_release(void *);
-
-/*
- * Try to write back everything synchronously (but check the
- * return value!)
- */
-extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int);
-extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_nocommit(struct inode *inode);
-extern int nfs_wb_page(struct inode *inode, struct page* page);
-extern int nfs_wb_page_priority(struct inode *inode, struct page* page, int how);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-extern int  nfs_commit_inode(struct inode *, int);
-extern struct nfs_write_data *nfs_commitdata_alloc(void);
-extern void nfs_commit_free(struct nfs_write_data *wdata);
-extern void nfs_commitdata_release(void *wdata);
-#else
-static inline int
-nfs_commit_inode(struct inode *inode, int how)
-{
-	return 0;
-}
-#endif
-
-static inline int
-nfs_have_writebacks(struct inode *inode)
-{
-	return NFS_I(inode)->npages != 0;
-}
-
-/*
- * Allocate nfs_write_data structures
- */
-extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
-
-/*
- * linux/fs/nfs/read.c
- */
-extern int  nfs_readpage(struct file *, struct page *);
-extern int  nfs_readpages(struct file *, struct address_space *,
-		struct list_head *, unsigned);
-extern int  nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
-extern void nfs_readdata_release(void *data);
-
-/*
- * Allocate nfs_read_data structures
- */
-extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
-
-/*
- * linux/fs/nfs3proc.c
- */
-#ifdef CONFIG_NFS_V3_ACL
-extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type);
-extern int nfs3_proc_setacl(struct inode *inode, int type,
-			    struct posix_acl *acl);
-extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
-		mode_t mode);
-extern void nfs3_forget_cached_acls(struct inode *inode);
-#else
-static inline int nfs3_proc_set_default_acl(struct inode *dir,
-					    struct inode *inode,
-					    mode_t mode)
-{
-	return 0;
-}
-
-static inline void nfs3_forget_cached_acls(struct inode *inode)
-{
-}
-#endif /* CONFIG_NFS_V3_ACL */
-
-/*
- * linux/fs/mount_clnt.c
- */
-extern int  nfs_mount(struct sockaddr *, size_t, char *, char *,
-		      int, int, struct nfs_fh *);
-
-/*
- * inline functions
- */
-
-static inline loff_t nfs_size_to_loff_t(__u64 size)
-{
-	if (size > (__u64) OFFSET_MAX - 1)
-		return OFFSET_MAX - 1;
-	return (loff_t) size;
-}
-
-static inline ino_t
-nfs_fileid_to_ino_t(u64 fileid)
-{
-	ino_t ino = (ino_t) fileid;
-	if (sizeof(ino_t) < sizeof(u64))
-		ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8;
-	return ino;
-}
-
-/* NFS root */
-
-extern void * nfs_root_data(void);
-
-#define nfs_wait_event(clnt, wq, condition)				\
-({									\
-	int __retval = 0;						\
-	if (clnt->cl_intr) {						\
-		sigset_t oldmask;					\
-		rpc_clnt_sigmask(clnt, &oldmask);			\
-		__retval = wait_event_interruptible(wq, condition);	\
-		rpc_clnt_sigunmask(clnt, &oldmask);			\
-	} else								\
-		wait_event(wq, condition);				\
-	__retval;							\
-})
-
-#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
-
-#endif /* __KERNEL__ */
-
-/*
- * NFS debug flags
- */
-#define NFSDBG_VFS		0x0001
-#define NFSDBG_DIRCACHE		0x0002
-#define NFSDBG_LOOKUPCACHE	0x0004
-#define NFSDBG_PAGECACHE	0x0008
-#define NFSDBG_PROC		0x0010
-#define NFSDBG_XDR		0x0020
-#define NFSDBG_FILE		0x0040
-#define NFSDBG_ROOT		0x0080
-#define NFSDBG_CALLBACK		0x0100
-#define NFSDBG_CLIENT		0x0200
-#define NFSDBG_MOUNT		0x0400
-#define NFSDBG_ALL		0xFFFF
-
-#ifdef __KERNEL__
-
-/*
- * Enable debugging support for nfs client.
- * Requires RPC_DEBUG.
- */
-#ifdef RPC_DEBUG
-# define NFS_DEBUG
-#endif
-
-# undef ifdebug
-# ifdef NFS_DEBUG
-#  define ifdebug(fac)		if (unlikely(nfs_debug & NFSDBG_##fac))
-# else
-#  define ifdebug(fac)		if (0)
-# endif
-#endif /* __KERNEL */
-
-#endif

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_mount.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_mount.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/nfs_mount.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,68 +0,0 @@
-#ifndef _LINUX_NFS_MOUNT_H
-#define _LINUX_NFS_MOUNT_H
-
-/*
- *  linux/include/linux/nfs_mount.h
- *
- *  Copyright (C) 1992  Rick Sladkey
- *
- *  structure passed from user-space to kernel-space during an nfs mount
- */
-#include <linux/in.h>
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-
-/*
- * WARNING!  Do not delete or change the order of these fields.  If
- * a new field is required then add it to the end.  The version field
- * tracks which fields are present.  This will ensure some measure of
- * mount-to-kernel version compatibility.  Some of these aren't used yet
- * but here they are anyway.
- */
-#define NFS_MOUNT_VERSION	6
-#define NFS_MAX_CONTEXT_LEN	256
-
-struct nfs_mount_data {
-	int		version;		/* 1 */
-	int		fd;			/* 1 */
-	struct nfs2_fh	old_root;		/* 1 */
-	int		flags;			/* 1 */
-	int		rsize;			/* 1 */
-	int		wsize;			/* 1 */
-	int		timeo;			/* 1 */
-	int		retrans;		/* 1 */
-	int		acregmin;		/* 1 */
-	int		acregmax;		/* 1 */
-	int		acdirmin;		/* 1 */
-	int		acdirmax;		/* 1 */
-	struct sockaddr_in addr;		/* 1 */
-	char		hostname[NFS_MAXNAMLEN + 1];		/* 1 */
-	int		namlen;			/* 2 */
-	unsigned int	bsize;			/* 3 */
-	struct nfs3_fh	root;			/* 4 */
-	int		pseudoflavor;		/* 5 */
-	char		context[NFS_MAX_CONTEXT_LEN + 1];	/* 6 */
-};
-
-/* bits in the flags field */
-
-#define NFS_MOUNT_SOFT		0x0001	/* 1 */
-#define NFS_MOUNT_INTR		0x0002	/* 1 */
-#define NFS_MOUNT_SECURE	0x0004	/* 1 */
-#define NFS_MOUNT_POSIX		0x0008	/* 1 */
-#define NFS_MOUNT_NOCTO		0x0010	/* 1 */
-#define NFS_MOUNT_NOAC		0x0020	/* 1 */
-#define NFS_MOUNT_TCP		0x0040	/* 2 */
-#define NFS_MOUNT_VER3		0x0080	/* 3 */
-#define NFS_MOUNT_KERBEROS	0x0100	/* 3 */
-#define NFS_MOUNT_NONLM		0x0200	/* 3 */
-#define NFS_MOUNT_BROKEN_SUID	0x0400	/* 4 */
-#define NFS_MOUNT_NOACL		0x0800	/* 4 */
-#define NFS_MOUNT_STRICTLOCK	0x1000	/* reserved for NFSv4 */
-#define NFS_MOUNT_SECFLAVOUR	0x2000	/* 5 */
-#define NFS_MOUNT_NORDIRPLUS	0x4000	/* 5 */
-#define NFS_MOUNT_UNSHARED	0x8000	/* 5 */
-#define NFS_MOUNT_FLAGMASK	0xFFFF
-
-#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/pagemap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/pagemap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/pagemap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_PAGEMAP_H
+#define BACKPORT_LINUX_PAGEMAP_H
+
+#include_next <linux/pagemap.h>
+
+#define __grab_cache_page	grab_cache_page
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/path.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/path.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/path.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,12 +1,36 @@
 #ifndef _BACKPORT_LINUX_PATH_H
 #define _BACKPORT_LINUX_PATH_H
 
-struct dentry;
-struct vfsmount;
+#include <linux/mount.h>
+#include <linux/namei.h>
 
 struct path {
 	struct vfsmount *mnt;
 	struct dentry *dentry;
 };
 
+static inline void path_put(struct path *path)
+{
+	dput(path->dentry);
+	mntput(path->mnt);
+}
+
+static inline void path_get(struct path *path)
+{
+	mntget(path->mnt);
+	dget(path->dentry);
+}
+
+static inline void backport_path_put(struct nameidata *nd)
+{
+	dput(nd->dentry);
+	mntput(nd->mnt);
+}
+
+static inline void backport_path_get(struct nameidata *nd)
+{
+	mntget(nd->mnt);
+	dget(nd->dentry);
+}
+
 #endif  /* _BACKPORT_LINUX_PATH_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/proc_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/proc_fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/proc_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+#ifndef BACKPORT_LINUX_PROC_FS_H
+#define BACKPORT_LINUX_PROC_FS_H
+
+#include_next <linux/proc_fs.h>
+
+static inline struct proc_dir_entry *proc_create(const char *name,
+	mode_t mode, struct proc_dir_entry *parent,
+	const struct file_operations *fops)
+{
+	struct proc_dir_entry *res = create_proc_entry(name, mode, parent);
+	if (res)
+		res->proc_fops = (struct file_operations *)fops;
+	return res;
+}
+
+static inline struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+				struct proc_dir_entry *parent,
+				const struct file_operations *proc_fops,
+				void *data)
+{
+		struct proc_dir_entry *pde;
+
+		pde = proc_create(name, mode, parent, proc_fops);
+		if (pde)
+			pde->data = data;
+
+		return pde;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/radix-tree.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/radix-tree.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/radix-tree.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,16 @@
+#ifndef BACKPORT_LINUX_RADIX_TREE_H
+#define BACKPORT_LINUX_RADIX_TREE_H
+
+#include_next <linux/radix-tree.h>
+#if 0
+static inline int radix_tree_preload(gfp_t gfp_mask)
+{
+	return 0;
+}
+
+static inline void radix_tree_preload_end(void)
+{
+}
+
+#endif
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/scatterlist.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/scatterlist.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/scatterlist.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -2,6 +2,11 @@
 #define __BACKPORT_LINUX_SCATTERLIST_H_TO_2_6_23__
 #include_next<linux/scatterlist.h>
 
+static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
+{
+	sg->page = page;
+}
+
 static inline void sg_set_page(struct scatterlist *sg, struct page *page,
                                unsigned int len, unsigned int offset)
 {
@@ -10,6 +15,10 @@
 	sg->length = len;
 }
 
+static inline void sg_mark_end(struct scatterlist *sg)
+{
+}
+
 #define sg_page(a) (a)->page
 #define sg_init_table(a, b)
 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sched.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sched.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sched.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+#ifndef LINUX_SCHED_BACKPORT_H
+#define LINUX_SCHED_BACKPORT_H
+
+#include_next <linux/sched.h>
+
+#define TASK_WAKEKILL	   128
+
+#define TASK_KILLABLE	   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+
+#define schedule_timeout_killable(_arg) schedule_timeout_interruptible(_arg)
+
+static inline int __fatal_signal_pending(struct task_struct *tsk)
+{
+	return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+	return signal_pending(p) && __fatal_signal_pending(p);
+}
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+	return tsk->pid;
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/security.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/security.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/security.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,54 @@
+#ifndef BACKPORT_LINUX_SECURITY_H
+#define BACKPORT_LINUX_SECURITY_H
+
+#include_next <linux/security.h>
+
+struct security_mnt_opts {
+	char **mnt_opts;
+	int *mnt_opts_flags;
+	int num_mnt_opts;
+};
+
+static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+{
+	opts->mnt_opts = NULL;
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+{
+	int i;
+	if (opts->mnt_opts)
+		for (i = 0; i < opts->num_mnt_opts; i++)
+			kfree(opts->mnt_opts[i]);
+	kfree(opts->mnt_opts);
+	opts->mnt_opts = NULL;
+	kfree(opts->mnt_opts_flags);
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline int security_sb_set_mnt_opts(struct super_block *sb,
+					   struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+					      struct super_block *newsb)
+{ }
+
+static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline int backport_security_sb_copy_data(void *orig, void *copy)
+{
+	return 0;
+}
+
+#define security_sb_copy_data(a,b) backport_security_sb_copy_data(a,b)
+
+#endif /* BACKPORT_LINUX_SECURITY_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/seq_file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/seq_file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/seq_file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,61 @@
+#ifndef BACKPORT_LINUX_SEQ_FILE_H
+#define BACKPORT_LINUX_SEQ_FILE_H
+
+#include_next <linux/seq_file.h>
+#include <linux/fs.h>
+
+static inline struct list_head *seq_list_start(struct list_head *head, loff_t pos)
+{
+	struct list_head *lh;
+
+	list_for_each(lh, head)
+		if (pos-- == 0)
+			return lh;
+
+	return NULL;
+}
+
+static inline struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
+{
+	if (!pos)
+		return head;
+
+	return seq_list_start(head, pos - 1);
+}
+
+static inline struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
+{
+	struct list_head *lh;
+
+	lh = ((struct list_head *)v)->next;
+	++*ppos;
+	return lh == head ? NULL : lh;
+}
+
+static inline void *__seq_open_private(struct file *f, struct seq_operations *ops,
+		int psize)
+{
+	int rc;
+	void *private;
+	struct seq_file *seq;
+
+	private = kzalloc(psize, GFP_KERNEL);
+	if (private == NULL)
+		goto out;
+
+	rc = seq_open(f, ops);
+	if (rc < 0)
+		goto out_free;
+
+	seq = f->private_data;
+	seq->private = private;
+	return private;
+
+out_free:
+	kfree(private);
+out:
+	return NULL;
+}
+
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/skbuff.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/skbuff.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/skbuff.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -106,4 +106,9 @@
         memcpy(to, skb->data + offset, len);
 }
 
+static inline int skb_csum_unnecessary(const struct sk_buff *skb)
+{
+	return skb->ip_summed & CHECKSUM_UNNECESSARY;
+}
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/string.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/string.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/string.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,25 @@
+#ifndef BACKPORT_LINUX_STRING_H
+#define BACKPORT_LINUX_STRING_H
+
+#include_next <linux/string.h>
+
+extern void *__kmalloc(size_t, gfp_t);
+extern char *strndup_user(const char __user *, long);
+
+static inline char *kstrndup(const char *s, size_t max, gfp_t gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strnlen(s, max);
+	buf = __kmalloc(len+1, gfp);
+	if (buf) {
+		memcpy(buf, s, len);
+		buf[len] = '\0';
+	}
+	return buf;
+}
+#endif /* BACKPORT_LINUX_STRING_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/swap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/swap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/swap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+#ifndef LINUX_SWAP_BACKPORT_H
+#define LINUX_SWAP_BACKPORT_H
+
+#include_next <linux/swap.h>
+
+static inline unsigned int backport_nr_free_buffer_pages(void)
+{
+	/* Just pick one node, since fallback list is circular */
+	pg_data_t *pgdat = NODE_DATA(numa_node_id());
+	unsigned int sum = 0;
+
+	struct zonelist *zonelist = pgdat->node_zonelists + gfp_zone(GFP_USER);
+	struct zone **zonep = zonelist->zones;
+	struct zone *zone;
+
+	for (zone = *zonep++; zone; zone = *zonep++) {
+		unsigned long size = zone->present_pages;
+		unsigned long high = zone->pages_high;
+		if (size > high)
+			sum += size - high;
+	}
+
+	return sum;
+}
+
+#define nr_free_buffer_pages backport_nr_free_buffer_pages
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sysctl.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sysctl.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/sysctl.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -66,4 +66,10 @@
 
 #define unregister_sysctl_table(hdr)	fake_unregister_sysctl_table(hdr)
 
+static inline struct ctl_table_header *
+backport_register_sysctl_table(ctl_table *table) {
+	return register_sysctl_table(table, 0);
+}
+
+#define register_sysctl_table backport_register_sysctl_table
 #endif /* __BACKPORT_SYSCTL_H_TO_2_6_18__ */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/types.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/types.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/types.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,4 +5,10 @@
 
 typedef _Bool bool;
 
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#else
+typedef unsigned long blkcnt_t;
 #endif
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/wait.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/wait.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/wait.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,47 @@
+#ifndef BACKPORT_LINUX_WAIT_H
+#define BACKPORT_LINUX_WAIT_H
+
+#include_next <linux/wait.h>
+
+#define __wait_event_killable(wq, condition, ret)		\
+do {								\
+	DEFINE_WAIT(__wait);					\
+								\
+	for (;;) {						\
+		prepare_to_wait(&wq, &__wait, TASK_KILLABLE);	\
+		if (condition)					\
+			break;					\
+		if (!fatal_signal_pending(current)) {		\
+			schedule();				\
+			continue;				\
+		}						\
+		ret = -ERESTARTSYS;				\
+		break;						\
+	}							\
+	finish_wait(&wq, &__wait);				\
+} while (0)
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition)			\
+({								\
+	int __ret = 0;						\
+	if (!(condition))					\
+		__wait_event_killable(wq, condition, __ret);	\
+	__ret;							\
+})
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/workqueue.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/workqueue.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/linux/workqueue.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -61,4 +61,11 @@
 #define cancel_rearming_delayed_workqueue backport_cancel_rearming_delayed_workqueue
 #define schedule_delayed_work backport_schedule_delayed_work
 
+static inline void backport_cancel_rearming_delayed_work(struct delayed_work *work)
+{
+	cancel_delayed_work_sync(work);
+}
+
+#define cancel_rearming_delayed_work backport_cancel_rearming_delayed_work
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/ipv6.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/ipv6.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/ipv6.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,16 +1,10 @@
-#ifndef __BACKPORT_NET_IVP6_H
-#define __BACKPORT_NET_IVP6_H
+#ifndef BACKPORT_NET_IPV6_H
+#define BACKPORT_NET_IPV6_H
 
 #include_next <net/ipv6.h>
 
-static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
-{
-	        return ((a->s6_addr32[0] | a->s6_addr32[1] | 
-					(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0);
-}
-
 static inline void ipv6_addr_set_v4mapped(const __be32 addr,
-		struct in6_addr *v4mapped)
+					  struct in6_addr *v4mapped)
 {
 	ipv6_addr_set(v4mapped,
 			0, 0,
@@ -18,4 +12,9 @@
 			addr);
 }
 
-#endif /* __BACKPORT_NET_IVP6_H */
+static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+	return ((a->s6_addr32[0] | a->s6_addr32[1] |
+		(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0);
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/udp.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/udp.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/net/udp.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_NET_UDP_H
+#define BACKPORT_NET_UDP_H
+
+#include_next <net/udp.h>
+
+static inline void UDPX_INC_STATS_BH(struct sock *sk, int field)
+{ }
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/namespace.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/namespace.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/namespace.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,95 @@
+#include <linux/spinlock_types.h>
+#include <linux/percpu.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+
+struct mnt_writer {
+	/*
+	 * If holding multiple instances of this lock, they
+	 * must be ordered by cpu number.
+	 */
+	spinlock_t lock;
+	struct lock_class_key lock_class; /* compiles out with !lockdep */
+	unsigned long count;
+	struct vfsmount *mnt;
+} ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+
+int __init init_mnt_writers(void)
+{
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
+		spin_lock_init(&writer->lock);
+		lockdep_set_class(&writer->lock, &writer->lock_class);
+		writer->count = 0;
+	}
+	return 0;
+}
+
+static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
+{
+	if (!cpu_writer->mnt)
+		return;
+	/*
+	 * This is in case anyone ever leaves an invalid,
+	 * old ->mnt and a count of 0.
+	 */
+	if (!cpu_writer->count)
+		return;
+	cpu_writer->count = 0;
+}
+
+static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
+					  struct vfsmount *mnt)
+{
+	if (cpu_writer->mnt == mnt)
+		return;
+	__clear_mnt_count(cpu_writer);
+	cpu_writer->mnt = mnt;
+}
+
+int mnt_want_write(struct vfsmount *mnt)
+{
+	int ret = 0;
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+	if (__mnt_is_readonly(mnt)) {
+		ret = -EROFS;
+		goto out;
+	}
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	cpu_writer->count++;
+out:
+	spin_unlock(&cpu_writer->lock);
+	put_cpu_var(mnt_writers);
+	return ret;
+}
+EXPORT_SYMBOL(mnt_want_write);
+
+void mnt_drop_write(struct vfsmount *mnt)
+{
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	if (cpu_writer->count > 0) {
+		cpu_writer->count--;
+	}
+
+	spin_unlock(&cpu_writer->lock);
+	/*
+	 * This could be done right after the spinlock
+	 * is taken because the spinlock keeps us on
+	 * the cpu, and disables preemption.  However,
+	 * putting it here bounds the amount that
+	 * __mnt_writers can underflow.  Without it,
+	 * we could theoretically wrap __mnt_writers.
+	 */
+	put_cpu_var(mnt_writers);
+}
+EXPORT_SYMBOL(mnt_drop_write);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/socket.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/socket.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/socket.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
+{
+	struct sock *sk = sock->sk;
+	int err;
+
+	err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock);
+	if (err < 0)
+		goto done;
+
+	err = sock->ops->accept(sock, *newsock, flags);
+	if (err < 0) {
+		sock_release(*newsock);
+		goto done;
+	}
+
+	(*newsock)->ops = sock->ops;
+
+done:
+	return err;
+}
+EXPORT_SYMBOL(kernel_accept);
+
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
+{
+	mm_segment_t oldfs = get_fs();
+	int err;
+
+	set_fs(KERNEL_DS);
+	err = sock->ops->ioctl(sock, cmd, arg);
+	set_fs(oldfs);
+
+	return err;
+}
+EXPORT_SYMBOL(kernel_sock_ioctl);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/strndup.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/strndup.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/strndup.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,38 @@
+#include <linux/err.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+
+/*
+ * strndup_user - duplicate an existing string from user space
+ *
+ * @s: The string to duplicate
+ * @n: Maximum number of bytes to copy, including the trailing NUL.
+ */
+char *strndup_user(const char __user *s, long n)
+{
+	char *p;
+	long length;
+
+	length = strnlen_user(s, n);
+
+	if (!length)
+		return ERR_PTR(-EFAULT);
+
+	if (length > n)
+		return ERR_PTR(-EINVAL);
+
+	p = kmalloc(length, GFP_KERNEL);
+
+	if (!p)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(p, s, length)) {
+		kfree(p);
+		return ERR_PTR(-EFAULT);
+	}
+
+	p[length - 1] = '\0';
+
+	return p;
+}
+EXPORT_SYMBOL(strndup_user);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/writeback.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/writeback.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.16_sles10_sp2/include/src/writeback.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,110 @@
+#include <linux/backing-dev.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+#include <linux/writeback.h>
+#include <linux/mpage.h>
+#include <linux/module.h>
+
+int write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data)
+{
+        struct backing_dev_info *bdi = mapping->backing_dev_info;
+        int ret = 0;
+        int done = 0;
+        struct pagevec pvec;
+        int nr_pages;
+        pgoff_t index;
+        pgoff_t end;            /* Inclusive */
+        int scanned = 0;
+        int is_range = 0;
+        long nr_to_write = wbc->nr_to_write;
+
+        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                wbc->encountered_congestion = 1;
+                return 0;
+        }
+
+        pagevec_init(&pvec, 0);
+        if (wbc->sync_mode == WB_SYNC_NONE) {
+                index = mapping->writeback_index; /* Start from prev offset */
+                end = -1;
+        } else {
+		index = 0;                        /* whole-file sweep */
+		scanned = 1;
+	}
+	if (wbc->start || wbc->end) {
+                index = wbc->start >> PAGE_CACHE_SHIFT;
+                end = wbc->end >> PAGE_CACHE_SHIFT;
+                is_range = 1;
+                scanned = 1;
+        }
+retry:
+        while (!done && (index <= end) &&
+               (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                              PAGECACHE_TAG_DIRTY,
+                                              min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+                unsigned i;
+
+                scanned = 1;
+                for (i = 0; i < nr_pages; i++) {
+                        struct page *page = pvec.pages[i];
+
+                        /*
+                         * At this point we hold neither mapping->tree_lock nor
+                         * lock on the page itself: the page may be truncated or
+                         * invalidated (changing page->mapping to NULL), or even
+                         * swizzled back from swapper_space to tmpfs file
+                         * mapping
+                         */
+                        lock_page(page);
+
+                        if (unlikely(page->mapping != mapping)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (unlikely(is_range) && page->index > end) {
+                                done = 1;
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (wbc->sync_mode != WB_SYNC_NONE)
+                                wait_on_page_writeback(page);
+
+                        if (PageWriteback(page) ||
+                            !clear_page_dirty_for_io(page)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        ret = (*writepage)(page, wbc, data);
+
+                        if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+                                unlock_page(page);
+                                ret = 0;
+                        }
+                        if (ret || (--nr_to_write <= 0))
+                                done = 1;
+                        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                                wbc->encountered_congestion = 1;
+                                done = 1;
+                        }
+                }
+                pagevec_release(&pvec);
+                cond_resched();
+        }
+        if (!scanned && !done) {
+                /*
+                 * We hit the last page and there is more work to be done: wrap
+                 * back to the start of the file
+                 */
+                scanned = 1;
+                index = 0;
+                goto retry;
+        }
+        return ret;
+}
+EXPORT_SYMBOL(write_cache_pages);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.17/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.17/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.17/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -40,6 +40,7 @@
 
 #define NETIF_F_TSO6		0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,6 +15,7 @@
 	(netdev)->ethtool_ops = (struct ethtool_ops *)(ops)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm/unaligned.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm/unaligned.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm/unaligned.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+#define ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+
+#include <linux/unaligned/access_ok.h>
+#include_next <asm/unaligned.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm-generic/atomic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm-generic/atomic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/asm-generic/atomic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,40 @@
+#ifndef __BACKPORT_ASM_GENERIC_ATOMIC_H
+#define __BACKPORT_ASM_GENERIC_ATOMIC_H
+
+#include_next <asm-generic/atomic.h>
+
+#if BITS_PER_LONG == 64
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_dec_return(v);
+}
+
+#else
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_dec_return(v);
+}
+
+#endif  /*  BITS_PER_LONG == 64  */
+
+#endif  /*  __BACKPORT_ASM_GENERIC_ATOMIC_H  */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/backing-dev.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/backing-dev.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/backing-dev.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef BACKPORT_LINUX_BACK_DEV_H
+#define BACKPORT_LINUX_BACK_DEV_H
+
+#include_next <linux/backing-dev.h>
+
+enum bdi_stat_item {
+	BDI_RECLAIMABLE,
+	BDI_WRITEBACK,
+	NR_BDI_STAT_ITEMS
+};
+
+
+static inline void inc_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline int bdi_init(struct backing_dev_info *bdi)
+{
+	return 0;
+}
+
+static inline void bdi_destroy(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+				const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+{
+	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
+}
+
+static inline void bdi_unregister(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+static inline void set_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/capability.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/capability.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/capability.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+#ifndef BACKPORT_LINUX_CAPABILITY_H
+#define BACKPORT_LINUX_CAPABILITY_H
+
+#include_next <linux/capability.h>
+
+/* Override MAC access.
+   The base kernel enforces no MAC policy.
+   An LSM may enforce a MAC policy, and if it does and it chooses
+   to implement capability based overrides of that policy, this is
+   the capability it should use to do so. */
+
+#define CAP_MAC_OVERRIDE     32
+
+#define CAP_FS_MASK_B0	(CAP_TO_MASK(CAP_CHOWN)			\
+			 | CAP_TO_MASK(CAP_DAC_OVERRIDE)	\
+			 | CAP_TO_MASK(CAP_DAC_READ_SEARCH)	\
+			 | CAP_TO_MASK(CAP_FOWNER)		\
+			 | CAP_TO_MASK(CAP_FSETID))
+
+#define CAP_FS_MASK_B1	(CAP_TO_MASK(CAP_MAC_OVERRIDE))
+
+#define CAP_NFSD_SET	(CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE))
+#define CAP_FS_SET	(CAP_FS_MASK_B0)
+
+static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+					      const kernel_cap_t permitted)
+{
+	const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
+	return cap_combine(a,
+			   cap_intersect(permitted, __cap_nfsd_set));
+}
+
+static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
+{
+	const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
+	return cap_drop(a, __cap_fs_set);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/completion.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/completion.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/completion.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_COMPLETION_H
+#define BACKPORT_LINUX_COMPLETION_H
+
+#include_next <linux/completion.h>
+
+#define wait_for_completion_killable(_args) wait_for_completion_interruptible(_args)
+
+#endif /* BACKPORT_LINUX_COMPLETION_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/crypto.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/crypto.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/crypto.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -11,6 +11,16 @@
 	u32 flags;
 };
 
+struct crypto_blkcipher {
+	struct crypto_tfm base;
+};
+
+struct blkcipher_desc {
+	struct crypto_blkcipher *tfm;
+	void *info;
+	u32 flags;
+};
+
 static inline int crypto_hash_init(struct hash_desc *desc)
 {
 	crypto_digest_init(desc->tfm);
@@ -51,4 +61,57 @@
 	crypto_free_tfm(tfm);
 }
 
+#define crypto_hash_digestsize(tfm) crypto_tfm_alg_digestsize(tfm)
+#define crypto_hash_setkey(a,b,c) crypto_digest_setkey(a,b,c)
+
+static inline struct crypto_tfm *crypto_blkcipher_tfm(
+	struct crypto_blkcipher *tfm)
+{
+	return &tfm->base;
+}
+
+static inline unsigned int crypto_blkcipher_blocksize(
+	struct crypto_blkcipher *tfm)
+{
+	return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
+{
+	return crypto_tfm_alg_ivsize(crypto_blkcipher_tfm(tfm));
+}
+
+static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
+					      struct scatterlist *dst,
+					      struct scatterlist *src,
+					      unsigned int nbytes)
+{
+	return crypto_cipher_encrypt_iv(crypto_blkcipher_tfm(desc->tfm), dst, src, nbytes, (u8 *)desc->info); 
+}
+
+static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
+					      struct scatterlist *dst,
+					      struct scatterlist *src,
+					      unsigned int nbytes)
+{
+	return crypto_cipher_decrypt_iv(crypto_blkcipher_tfm(desc->tfm), dst, src, nbytes, (u8 *)desc->info);
+}
+
+static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
+	const char *alg_name, u32 type, u32 mask)
+{
+	return (struct crypto_blkcipher *)crypto_alloc_tfm(alg_name, mask);
+}
+
+static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
+{
+	crypto_free_tfm(crypto_blkcipher_tfm(tfm));
+}
+
+static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
+					  const u8 *key, unsigned int keylen)
+{
+	return crypto_cipher_setkey(crypto_blkcipher_tfm(tfm), key, keylen);
+}
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/err.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/err.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/err.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+#ifndef BACKPORT_LINUX_ERR_H
+#define BACKPORT_LINUX_ERR_H
+
+#include_next <linux/err.h>
+
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void *ERR_CAST(const void *ptr)
+{
+	/* cast away the const */
+	return (void *) ptr;
+}
+
+#endif /* BACKPORT_LINUX_ERR_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fcntl.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fcntl.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fcntl.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_FCNTL_H
+#define BACKPORT_LINUX_FCNTL_H
+
+#include_next <linux/fcntl.h>
+
+#define F_CANCELLK      (F_LINUX_SPECIFIC_BASE+5)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,12 @@
+#ifndef _BACKPORT_LINUX_FILE_H_
+#define _BACKPORT_LINUX_FILE_H_
+
+#include_next <linux/file.h>
+#include <linux/fs.h>
+
+static inline void drop_file_write_access(struct file *filp)
+{
+	put_write_access(filp->f_dentry->d_inode);
+}
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fs.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,1898 +1,90 @@
-#ifndef _LINUX_FS_H
-#define _LINUX_FS_H
+#ifndef BACKPORT_LINUX_FS_H
+#define BACKPORT_LINUX_FS_H
 
-/*
- * This file has definitions for some important file table
- * structures etc.
- */
+#include_next <linux/fs.h>
+#include <linux/mount.h>
 
-#include <linux/limits.h>
-#include <linux/ioctl.h>
-
-/*
- * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
- * the file limit at runtime and only root can increase the per-process
- * nr_file rlimit, so it's safe to set up a ridiculously high absolute
- * upper limit on files-per-process.
- *
- * Some programs (notably those using select()) may have to be 
- * recompiled to take full advantage of the new limits..  
- */
-
-/* Fixed constants first: */
-#undef NR_OPEN
-#define NR_OPEN (1024*1024)	/* Absolute upper limit on fd num */
-#define INR_OPEN 1024		/* Initial setting for nfile rlimits */
-
-#define BLOCK_SIZE_BITS 10
-#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
-
-#define SEEK_SET	0	/* seek relative to beginning of file */
-#define SEEK_CUR	1	/* seek relative to current file position */
-#define SEEK_END	2	/* seek relative to end of file */
-
-/* And dynamically-tunable limits and defaults: */
-struct files_stat_struct {
-	int nr_files;		/* read only */
-	int nr_free_files;	/* read only */
-	int max_files;		/* tunable */
-};
-extern struct files_stat_struct files_stat;
-extern int get_max_files(void);
-
-struct inodes_stat_t {
-	int nr_inodes;
-	int nr_unused;
-	int dummy[5];
-};
-extern struct inodes_stat_t inodes_stat;
-
-extern int leases_enable, lease_break_time;
-
-#ifdef CONFIG_DNOTIFY
-extern int dir_notify_enable;
-#endif
-
-#define NR_FILE  8192	/* this can well be larger on a larger system */
-
-#define MAY_EXEC 1
-#define MAY_WRITE 2
-#define MAY_READ 4
-#define MAY_APPEND 8
-
-#define FMODE_READ 1
-#define FMODE_WRITE 2
-
-/* Internal kernel extensions */
-#define FMODE_LSEEK	4
-#define FMODE_PREAD	8
-#define FMODE_PWRITE	FMODE_PREAD	/* These go hand in hand */
-
-/* File is being opened for execution. Primary users of this flag are
-   distributed filesystems that can use it to achieve correct ETXTBUSY
-   behavior for cross-node execution/opening_for_writing of files */
-#define FMODE_EXEC	16
-
-#define RW_MASK		1
-#define RWA_MASK	2
-#define READ 0
-#define WRITE 1
-#define READA 2		/* read-ahead  - don't block if no resources */
-#define SWRITE 3	/* for ll_rw_block() - wait for buffer lock */
-#define SPECIAL 4	/* For non-blockdevice requests in request queue */
-#define READ_SYNC	(READ | (1 << BIO_RW_SYNC))
-#define WRITE_SYNC	(WRITE | (1 << BIO_RW_SYNC))
-#define WRITE_BARRIER	((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
-
-#define SEL_IN		1
-#define SEL_OUT		2
-#define SEL_EX		4
-
-/* public flags for file_system_type */
-#define FS_REQUIRES_DEV 1 
-#define FS_BINARY_MOUNTDATA 2
-#define FS_REVAL_DOT	16384	/* Check the paths ".", ".." for staleness */
-#define FS_RENAME_DOES_D_MOVE	32768	/* FS will handle d_move()
+#define FILE_LOCK_DEFERRED -EINPROGRESS
+#define FS_RENAME_DOES_D_MOVE   32768   /* FS will handle d_move()
 					 * during rename() internally.
-					 */  
-/*
- * These are the fs-independent mount-flags: up to 32 flags are supported
- */
-#define MS_RDONLY	 1	/* Mount read-only */
-#define MS_NOSUID	 2	/* Ignore suid and sgid bits */
-#define MS_NODEV	 4	/* Disallow access to device special files */
-#define MS_NOEXEC	 8	/* Disallow program execution */
-#define MS_SYNCHRONOUS	16	/* Writes are synced at once */
-#define MS_REMOUNT	32	/* Alter flags of a mounted FS */
-#define MS_MANDLOCK	64	/* Allow mandatory locks on an FS */
-#define MS_DIRSYNC	128	/* Directory modifications are synchronous */
-#define MS_NOATIME	1024	/* Do not update access times. */
-#define MS_NODIRATIME	2048	/* Do not update directory access times */
-#define MS_BIND		4096
-#define MS_MOVE		8192
-#define MS_REC		16384
-#define MS_VERBOSE	32768	/* War is peace. Verbosity is silence.
-				   MS_VERBOSE is deprecated. */
-#define MS_SILENT	32768
-#define MS_POSIXACL	(1<<16)	/* VFS does not apply the umask */
-#define MS_UNBINDABLE	(1<<17)	/* change to unbindable */
-#define MS_PRIVATE	(1<<18)	/* change to private */
-#define MS_SLAVE	(1<<19)	/* change to slave */
-#define MS_SHARED	(1<<20)	/* change to shared */
-#define MS_NO_LEASES	(1<<21)	/* fs does not support leases */
-#define MS_ACTIVE	(1<<30)
-#define MS_NOUSER	(1<<31)
+					 */
 
-/*
- * Superblock flags that can be altered by MS_REMOUNT
- */
-#define MS_RMT_MASK	(MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK)
 
-/*
- * Old magic mount flag and mask
- */
-#define MS_MGC_VAL 0xC0ED0000
-#define MS_MGC_MSK 0xffff0000
+#define ATTR_KILL_PRIV  (1 << 14)
 
-/* Inode flags - they have nothing to superblock flags now */
+#define vfs_setlease(a, b, c) setlease(a, b, c)
 
-#define S_SYNC		1	/* Writes are synced at once */
-#define S_NOATIME	2	/* Do not update access times */
-#define S_APPEND	4	/* Append-only file */
-#define S_IMMUTABLE	8	/* Immutable file */
-#define S_DEAD		16	/* removed, but still open directory */
-#define S_NOQUOTA	32	/* Inode is not counted to quota */
-#define S_DIRSYNC	64	/* Directory modifications are synchronous */
-#define S_NOCMTIME	128	/* Do not update file c/mtime */
-#define S_SWAPFILE	256	/* Do not truncate: swapon got its bmaps */
-#define S_PRIVATE	512	/* Inode is fs-internal */
-
-/*
- * Note that nosuid etc flags are inode-specific: setting some file-system
- * flags just means all the inodes inherit those flags by default. It might be
- * possible to override it selectively if you really wanted to with some
- * ioctl() that is not currently implemented.
- *
- * Exception: MS_RDONLY is always applied to the entire file system.
- *
- * Unfortunately, it is possible to change a filesystems flags with it mounted
- * with files in use.  This means that all of the inodes will not have their
- * i_flags updated.  Hence, i_flags no longer inherit the superblock mount
- * flags, so these have to be checked separately. -- rmk at arm.uk.linux.org
- */
-#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
-
-#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
-#define IS_SYNC(inode)		(__IS_FLG(inode, MS_SYNCHRONOUS) || \
-					((inode)->i_flags & S_SYNC))
-#define IS_DIRSYNC(inode)	(__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
-					((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
-#define IS_MANDLOCK(inode)	__IS_FLG(inode, MS_MANDLOCK)
-
-#define IS_NOQUOTA(inode)	((inode)->i_flags & S_NOQUOTA)
-#define IS_APPEND(inode)	((inode)->i_flags & S_APPEND)
-#define IS_IMMUTABLE(inode)	((inode)->i_flags & S_IMMUTABLE)
-#define IS_POSIXACL(inode)	__IS_FLG(inode, MS_POSIXACL)
-
-#define IS_DEADDIR(inode)	((inode)->i_flags & S_DEAD)
-#define IS_NOCMTIME(inode)	((inode)->i_flags & S_NOCMTIME)
-#define IS_SWAPFILE(inode)	((inode)->i_flags & S_SWAPFILE)
-#define IS_PRIVATE(inode)	((inode)->i_flags & S_PRIVATE)
-#define IS_NO_LEASES(inode)	__IS_FLG(inode, MS_NO_LEASES)
-
-/* the read-only stuff doesn't really belong here, but any other place is
-   probably as bad and I don't want to create yet another include file. */
-
-#define BLKROSET   _IO(0x12,93)	/* set device read-only (0 = read-write) */
-#define BLKROGET   _IO(0x12,94)	/* get read-only status (0 = read_write) */
-#define BLKRRPART  _IO(0x12,95)	/* re-read partition table */
-#define BLKGETSIZE _IO(0x12,96)	/* return device size /512 (long *arg) */
-#define BLKFLSBUF  _IO(0x12,97)	/* flush buffer cache */
-#define BLKRASET   _IO(0x12,98)	/* set read ahead for block device */
-#define BLKRAGET   _IO(0x12,99)	/* get current read ahead setting */
-#define BLKFRASET  _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */
-#define BLKFRAGET  _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
-#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
-#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
-#define BLKSSZGET  _IO(0x12,104)/* get block device sector size */
-#if 0
-#define BLKPG      _IO(0x12,105)/* See blkpg.h */
-
-/* Some people are morons.  Do not use sizeof! */
-
-#define BLKELVGET  _IOR(0x12,106,size_t)/* elevator get */
-#define BLKELVSET  _IOW(0x12,107,size_t)/* elevator set */
-/* This was here just to show that the number is taken -
-   probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */
-#endif
-/* A jump here: 108-111 have been used for various private purposes. */
-#define BLKBSZGET  _IOR(0x12,112,size_t)
-#define BLKBSZSET  _IOW(0x12,113,size_t)
-#define BLKGETSIZE64 _IOR(0x12,114,size_t)	/* return device size in bytes (u64 *arg) */
-#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
-#define BLKTRACESTART _IO(0x12,116)
-#define BLKTRACESTOP _IO(0x12,117)
-#define BLKTRACETEARDOWN _IO(0x12,118)
-
-#define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
-#define FIBMAP	   _IO(0x00,1)	/* bmap access */
-#define FIGETBSZ   _IO(0x00,2)	/* get the block size used for bmap */
-
-#define	FS_IOC_GETFLAGS			_IOR('f', 1, long)
-#define	FS_IOC_SETFLAGS			_IOW('f', 2, long)
-#define	FS_IOC_GETVERSION		_IOR('v', 1, long)
-#define	FS_IOC_SETVERSION		_IOW('v', 2, long)
-#define FS_IOC32_GETFLAGS		_IOR('f', 1, int)
-#define FS_IOC32_SETFLAGS		_IOW('f', 2, int)
-#define FS_IOC32_GETVERSION		_IOR('v', 1, int)
-#define FS_IOC32_SETVERSION		_IOW('v', 2, int)
-
-/*
- * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
- */
-#define	FS_SECRM_FL			0x00000001 /* Secure deletion */
-#define	FS_UNRM_FL			0x00000002 /* Undelete */
-#define	FS_COMPR_FL			0x00000004 /* Compress file */
-#define FS_SYNC_FL			0x00000008 /* Synchronous updates */
-#define FS_IMMUTABLE_FL			0x00000010 /* Immutable file */
-#define FS_APPEND_FL			0x00000020 /* writes to file may only append */
-#define FS_NODUMP_FL			0x00000040 /* do not dump file */
-#define FS_NOATIME_FL			0x00000080 /* do not update atime */
-/* Reserved for compression usage... */
-#define FS_DIRTY_FL			0x00000100
-#define FS_COMPRBLK_FL			0x00000200 /* One or more compressed clusters */
-#define FS_NOCOMP_FL			0x00000400 /* Don't compress */
-#define FS_ECOMPR_FL			0x00000800 /* Compression error */
-/* End compression flags --- maybe not all used */
-#define FS_BTREE_FL			0x00001000 /* btree format dir */
-#define FS_INDEX_FL			0x00001000 /* hash-indexed directory */
-#define FS_IMAGIC_FL			0x00002000 /* AFS directory */
-#define FS_JOURNAL_DATA_FL		0x00004000 /* Reserved for ext3 */
-#define FS_NOTAIL_FL			0x00008000 /* file tail should not be merged */
-#define FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
-#define FS_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
-#define FS_EXTENT_FL			0x00080000 /* Extents */
-#define FS_DIRECTIO_FL			0x00100000 /* Use direct i/o */
-#define FS_RESERVED_FL			0x80000000 /* reserved for ext2 lib */
-
-#define FS_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define FS_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
-
-
-#define SYNC_FILE_RANGE_WAIT_BEFORE	1
-#define SYNC_FILE_RANGE_WRITE		2
-#define SYNC_FILE_RANGE_WAIT_AFTER	4
-
-#ifdef __KERNEL__
-
-#include <linux/linkage.h>
-#include <linux/wait.h>
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/dcache.h>
-#include <linux/stat.h>
-#include <linux/cache.h>
-#include <linux/kobject.h>
-#include <linux/list.h>
-#include <linux/radix-tree.h>
-#include <linux/prio_tree.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-
-#include <asm/atomic.h>
-#include <asm/semaphore.h>
-#include <asm/byteorder.h>
-
-struct hd_geometry;
-struct iovec;
-struct nameidata;
-struct kiocb;
-struct pipe_inode_info;
-struct poll_table_struct;
-struct kstatfs;
-struct vm_area_struct;
-struct vfsmount;
-
-extern void __init inode_init(unsigned long);
-extern void __init inode_init_early(void);
-extern void __init mnt_init(unsigned long);
-extern void __init files_init(unsigned long);
-
-struct buffer_head;
-typedef int (get_block_t)(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create);
-typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
-			ssize_t bytes, void *private);
-
-/*
- * Attribute flags.  These should be or-ed together to figure out what
- * has been changed!
- */
-#define ATTR_MODE	1
-#define ATTR_UID	2
-#define ATTR_GID	4
-#define ATTR_SIZE	8
-#define ATTR_ATIME	16
-#define ATTR_MTIME	32
-#define ATTR_CTIME	64
-#define ATTR_ATIME_SET	128
-#define ATTR_MTIME_SET	256
-#define ATTR_FORCE	512	/* Not a change, but a change it */
-#define ATTR_ATTR_FLAG	1024
-#define ATTR_KILL_SUID	2048
-#define ATTR_KILL_SGID	4096
-#define ATTR_FILE	8192
-
-/*
- * This is the Inode Attributes structure, used for notify_change().  It
- * uses the above definitions as flags, to know which values have changed.
- * Also, in this manner, a Filesystem can look at only the values it cares
- * about.  Basically, these are the attributes that the VFS layer can
- * request to change from the FS layer.
- *
- * Derek Atkins <warlord at MIT.EDU> 94-10-20
- */
-struct iattr {
-	unsigned int	ia_valid;
-	umode_t		ia_mode;
-	uid_t		ia_uid;
-	gid_t		ia_gid;
-	loff_t		ia_size;
-	struct timespec	ia_atime;
-	struct timespec	ia_mtime;
-	struct timespec	ia_ctime;
-
-	/*
-	 * Not an attribute, but an auxilary info for filesystems wanting to
-	 * implement an ftruncate() like method.  NOTE: filesystem should
-	 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
-	 */
-	struct file	*ia_file;
-};
-
-/*
- * Includes for diskquotas.
- */
-#include <linux/quota.h>
-
-/** 
- * enum positive_aop_returns - aop return codes with specific semantics
- *
- * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
- * 			    completed, that the page is still locked, and
- * 			    should be considered active.  The VM uses this hint
- * 			    to return the page to the active list -- it won't
- * 			    be a candidate for writeback again in the near
- * 			    future.  Other callers must be careful to unlock
- * 			    the page if they get this return.  Returned by
- * 			    writepage(); 
- *
- * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
- *  			unlocked it and the page might have been truncated.
- *  			The caller should back up to acquiring a new page and
- *  			trying again.  The aop will be taking reasonable
- *  			precautions not to livelock.  If the caller held a page
- *  			reference, it should drop it before retrying.  Returned
- *  			by readpage(), prepare_write(), and commit_write().
- *
- * address_space_operation functions return these large constants to indicate
- * special semantics to the caller.  These are much larger than the bytes in a
- * page to allow for functions that return the number of bytes operated on in a
- * given page.
- */
-
-enum positive_aop_returns {
-	AOP_WRITEPAGE_ACTIVATE	= 0x80000,
-	AOP_TRUNCATED_PAGE	= 0x80001,
-};
-
-/*
- * oh the beauties of C type declarations.
- */
-struct page;
-struct address_space;
-struct writeback_control;
-
-struct address_space_operations {
-	int (*writepage)(struct page *page, struct writeback_control *wbc);
-	int (*readpage)(struct file *, struct page *);
-	void (*sync_page)(struct page *);
-
-	/* Write back some dirty pages from this mapping. */
-	int (*writepages)(struct address_space *, struct writeback_control *);
-
-	/* Set a page dirty.  Return true if this dirtied it */
-	int (*set_page_dirty)(struct page *page);
-
-	int (*readpages)(struct file *filp, struct address_space *mapping,
-			struct list_head *pages, unsigned nr_pages);
-
-	/*
-	 * ext3 requires that a successful prepare_write() call be followed
-	 * by a commit_write() call - they must be balanced
-	 */
-	int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
-	int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
-	/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
-	sector_t (*bmap)(struct address_space *, sector_t);
-	void (*invalidatepage) (struct page *, unsigned long);
-	int (*releasepage) (struct page *, gfp_t);
-	ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-			loff_t offset, unsigned long nr_segs);
-	struct page* (*get_xip_page)(struct address_space *, sector_t,
-			int);
-	/* migrate the contents of a page to the specified target */
-	int (*migratepage) (struct address_space *,
-			struct page *, struct page *);
-};
-
-struct backing_dev_info;
-struct address_space {
-	struct inode		*host;		/* owner: inode, block_device */
-	struct radix_tree_root	page_tree;	/* radix tree of all pages */
-	rwlock_t		tree_lock;	/* and rwlock protecting it */
-	unsigned int		i_mmap_writable;/* count VM_SHARED mappings */
-	struct prio_tree_root	i_mmap;		/* tree of private and shared mappings */
-	struct list_head	i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
-	spinlock_t		i_mmap_lock;	/* protect tree, count, list */
-	unsigned int		truncate_count;	/* Cover race condition with truncate */
-	unsigned long		nrpages;	/* number of total pages */
-	pgoff_t			writeback_index;/* writeback starts here */
-	const struct address_space_operations *a_ops;	/* methods */
-	unsigned long		flags;		/* error bits/gfp mask */
-	struct backing_dev_info *backing_dev_info; /* device readahead, etc */
-	spinlock_t		private_lock;	/* for use by the address_space */
-	struct list_head	private_list;	/* ditto */
-	struct address_space	*assoc_mapping;	/* ditto */
-} __attribute__((aligned(sizeof(long))));
-	/*
-	 * On most architectures that alignment is already the case; but
-	 * must be enforced here for CRIS, to let the least signficant bit
-	 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
-	 */
-
-struct block_device {
-	dev_t			bd_dev;  /* not a kdev_t - it's a search key */
-	struct inode *		bd_inode;	/* will die */
-	int			bd_openers;
-	struct mutex		bd_mutex;	/* open/close mutex */
-	struct semaphore        bd_mount_sem;
-	struct list_head	bd_inodes;
-	void *			bd_holder;
-	int			bd_holders;
-#ifdef CONFIG_SYSFS
-	struct list_head	bd_holder_list;
-#endif
-	struct block_device *	bd_contains;
-	unsigned		bd_block_size;
-	struct hd_struct *	bd_part;
-	/* number of times partitions within this device have been opened. */
-	unsigned		bd_part_count;
-	int			bd_invalidated;
-	struct gendisk *	bd_disk;
-	struct list_head	bd_list;
-	struct backing_dev_info *bd_inode_backing_dev_info;
-	/*
-	 * Private data.  You must have bd_claim'ed the block_device
-	 * to use this.  NOTE:  bd_claim allows an owner to claim
-	 * the same device multiple times, the owner must take special
-	 * care to not mess up bd_private for that case.
-	 */
-	unsigned long		bd_private;
-};
-
-/*
- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
- * radix trees
- */
-#define PAGECACHE_TAG_DIRTY	0
-#define PAGECACHE_TAG_WRITEBACK	1
-
-int mapping_tagged(struct address_space *mapping, int tag);
-
-/*
- * Might pages of this file be mapped into userspace?
- */
-static inline int mapping_mapped(struct address_space *mapping)
+static inline int __mandatory_lock(struct inode *ino)
 {
-	return	!prio_tree_empty(&mapping->i_mmap) ||
-		!list_empty(&mapping->i_mmap_nonlinear);
+	return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
 }
 
-/*
- * Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
- * marks vma as VM_SHARED if it is shared, and the file was opened for
- * writing i.e. vma may be mprotected writable even if now readonly.
- */
-static inline int mapping_writably_mapped(struct address_space *mapping)
-{
-	return mapping->i_mmap_writable != 0;
-}
+#define mandatory_lock(_args) MANDATORY_LOCK(_args)
 
-/*
- * Use sequence counter to get consistent i_size on 32-bit processors.
- */
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-#include <linux/seqlock.h>
-#define __NEED_I_SIZE_ORDERED
-#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
-#else
-#define i_size_ordered_init(inode) do { } while (0)
-#endif
-
-struct inode {
-	struct hlist_node	i_hash;
-	struct list_head	i_list;
-	struct list_head	i_sb_list;
-	struct list_head	i_dentry;
-	unsigned long		i_ino;
-	atomic_t		i_count;
-	umode_t			i_mode;
-	unsigned int		i_nlink;
-	uid_t			i_uid;
-	gid_t			i_gid;
-	dev_t			i_rdev;
-	loff_t			i_size;
-	struct timespec		i_atime;
-	struct timespec		i_mtime;
-	struct timespec		i_ctime;
-	unsigned int		i_blkbits;
-	unsigned long		i_version;
-	blkcnt_t		i_blocks;
-	unsigned short          i_bytes;
-	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
-	struct mutex		i_mutex;
-	struct rw_semaphore	i_alloc_sem;
-	struct inode_operations	*i_op;
-	const struct file_operations	*i_fop;	/* former ->i_op->default_file_ops */
-	struct super_block	*i_sb;
-	struct file_lock	*i_flock;
-	struct address_space	*i_mapping;
-	struct address_space	i_data;
-#ifdef CONFIG_QUOTA
-	struct dquot		*i_dquot[MAXQUOTAS];
-#endif
-	struct list_head	i_devices;
-	union {
-		struct pipe_inode_info	*i_pipe;
-		struct block_device	*i_bdev;
-		struct cdev		*i_cdev;
-	};
-	int			i_cindex;
-
-	__u32			i_generation;
-
-#ifdef CONFIG_DNOTIFY
-	unsigned long		i_dnotify_mask; /* Directory notify events */
-	struct dnotify_struct	*i_dnotify; /* for directory notifications */
-#endif
-
-#ifdef CONFIG_INOTIFY
-	struct list_head	inotify_watches; /* watches on this inode */
-	struct mutex		inotify_mutex;	/* protects the watches list */
-#endif
-
-	unsigned long		i_state;
-	unsigned long		dirtied_when;	/* jiffies of first dirtying */
-
-	unsigned int		i_flags;
-
-	atomic_t		i_writecount;
-	void			*i_security;
-	void			*i_private; /* fs or device private pointer */
-#ifdef __NEED_I_SIZE_ORDERED
-	seqcount_t		i_size_seqcount;
-#endif
-};
-
-/*
- * inode->i_mutex nesting subclasses for the lock validator:
- *
- * 0: the object of the current VFS operation
- * 1: parent
- * 2: child/target
- * 3: quota file
- *
- * The locking order between these classes is
- * parent -> child -> normal -> xattr -> quota
- */
-enum inode_i_mutex_lock_class
+static inline int backport_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
 {
-	I_MUTEX_NORMAL,
-	I_MUTEX_PARENT,
-	I_MUTEX_CHILD,
-	I_MUTEX_XATTR,
-	I_MUTEX_QUOTA
-};
-
-/*
- * NOTE: in a 32bit arch with a preemptable kernel and
- * an UP compile the i_size_read/write must be atomic
- * with respect to the local cpu (unlike with preempt disabled),
- * but they don't need to be atomic with respect to other cpus like in
- * true SMP (so they need either to either locally disable irq around
- * the read or for example on x86 they can be still implemented as a
- * cmpxchg8b without the need of the lock prefix). For SMP compiles
- * and 64bit archs it makes no difference if preempt is enabled or not.
- */
-static inline loff_t i_size_read(struct inode *inode)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-	loff_t i_size;
-	unsigned int seq;
-
-	do {
-		seq = read_seqcount_begin(&inode->i_size_seqcount);
-		i_size = inode->i_size;
-	} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
-	return i_size;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
-	loff_t i_size;
-
-	preempt_disable();
-	i_size = inode->i_size;
-	preempt_enable();
-	return i_size;
-#else
-	return inode->i_size;
-#endif
+	return vfs_symlink(dir, dentry, oldname, 0);
 }
 
+#define vfs_symlink(_dir, _dentry, _oldname) backport_vfs_symlink(_dir, _dentry, _oldname)
 
-static inline void i_size_write(struct inode *inode, loff_t i_size)
+#ifdef CONFIG_DEBUG_WRITECOUNT
+static inline void file_take_write(struct file *f)
 {
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-	write_seqcount_begin(&inode->i_size_seqcount);
-	inode->i_size = i_size;
-	write_seqcount_end(&inode->i_size_seqcount);
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
-	preempt_disable();
-	inode->i_size = i_size;
-	preempt_enable();
+	WARN_ON(f->f_mnt_write_state != 0);
+	f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
+}
 #else
-	inode->i_size = i_size;
+static inline void file_take_write(struct file *filp) {}
 #endif
-}
 
-static inline unsigned iminor(struct inode *inode)
+static inline int inode_permission(struct inode *inode, int flags)
 {
-	return MINOR(inode->i_rdev);
+	return permission(inode, flags, NULL);
 }
 
-static inline unsigned imajor(struct inode *inode)
+static inline int __mnt_is_readonly(struct vfsmount *mnt)
 {
-	return MAJOR(inode->i_rdev);
+	if (mnt->mnt_sb->s_flags & MS_RDONLY)
+		return 1;
+	return 0;
 }
 
-extern struct block_device *I_BDEV(struct inode *inode);
-
-struct fown_struct {
-	rwlock_t lock;          /* protects pid, uid, euid fields */
-	int pid;		/* pid or -pgrp where SIGIO should be sent */
-	uid_t uid, euid;	/* uid/euid of process setting the owner */
-	void *security;
-	int signum;		/* posix.1b rt signal to be delivered on IO */
-};
-
-/*
- * Track a single file's readahead state
- */
-struct file_ra_state {
-	unsigned long start;		/* Current window */
-	unsigned long size;
-	unsigned long flags;		/* ra flags RA_FLAG_xxx*/
-	unsigned long cache_hit;	/* cache hit count*/
-	unsigned long prev_page;	/* Cache last read() position */
-	unsigned long ahead_start;	/* Ahead window */
-	unsigned long ahead_size;
-	unsigned long ra_pages;		/* Maximum readahead window */
-	unsigned long mmap_hit;		/* Cache hit stat for mmap accesses */
-	unsigned long mmap_miss;	/* Cache miss stat for mmap accesses */
-};
-#define RA_FLAG_MISS 0x01	/* a cache miss occured against this file */
-#define RA_FLAG_INCACHE 0x02	/* file is already in cache */
-
-struct file {
-	/*
-	 * fu_list becomes invalid after file_free is called and queued via
-	 * fu_rcuhead for RCU freeing
-	 */
-	union {
-		struct list_head	fu_list;
-		struct rcu_head 	fu_rcuhead;
-	} f_u;
-	struct dentry		*f_dentry;
-	struct vfsmount         *f_vfsmnt;
-	const struct file_operations	*f_op;
-	atomic_t		f_count;
-	unsigned int 		f_flags;
-	mode_t			f_mode;
-	loff_t			f_pos;
-	struct fown_struct	f_owner;
-	unsigned int		f_uid, f_gid;
-	struct file_ra_state	f_ra;
-
-	unsigned long		f_version;
-	void			*f_security;
-
-	/* needed for tty driver, and maybe others */
-	void			*private_data;
-
-#ifdef CONFIG_EPOLL
-	/* Used by fs/eventpoll.c to link all the hooks to this file */
-	struct list_head	f_ep_links;
-	spinlock_t		f_ep_lock;
-#endif /* #ifdef CONFIG_EPOLL */
-	struct address_space	*f_mapping;
-};
-extern spinlock_t files_lock;
-#define file_list_lock() spin_lock(&files_lock);
-#define file_list_unlock() spin_unlock(&files_lock);
-
-#define get_file(x)	atomic_inc(&(x)->f_count)
-#define file_count(x)	atomic_read(&(x)->f_count)
-
-#define	MAX_NON_LFS	((1UL<<31) - 1)
-
-/* Page cache limit. The filesystems should put that into their s_maxbytes 
-   limits, otherwise bad things can happen in VM. */ 
-#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE	(((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
-#elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE 	0x7fffffffffffffffUL
-#endif
-
-#define FL_POSIX	1
-#define FL_FLOCK	2
-#define FL_ACCESS	8	/* not trying to lock, just looking */
-#define FL_EXISTS	16	/* when unlocking, test for existence */
-#define FL_LEASE	32	/* lease held on this file */
-#define FL_CLOSE	64	/* unlock on close */
-#define FL_SLEEP	128	/* A blocking lock */
-
-/*
- * The POSIX file lock owner is determined by
- * the "struct files_struct" in the thread group
- * (or NULL for no owner - BSD locks).
- *
- * Lockd stuffs a "host" pointer into this.
- */
-typedef struct files_struct *fl_owner_t;
-
-struct file_lock_operations {
-	void (*fl_insert)(struct file_lock *);	/* lock insertion callback */
-	void (*fl_remove)(struct file_lock *);	/* lock removal callback */
-	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
-	void (*fl_release_private)(struct file_lock *);
-};
-
-struct lock_manager_operations {
-	int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
-	void (*fl_notify)(struct file_lock *);	/* unblock callback */
-	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
-	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
-	void (*fl_release_private)(struct file_lock *);
-	void (*fl_break)(struct file_lock *);
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
-	int (*fl_change)(struct file_lock **, int);
-};
-
-/* that will die - we need it for nfs_lock_info */
-#include <linux/nfs_fs_i.h>
-
-struct file_lock {
-	struct file_lock *fl_next;	/* singly linked list for this inode  */
-	struct list_head fl_link;	/* doubly linked list of all locks */
-	struct list_head fl_block;	/* circular list of blocked processes */
-	fl_owner_t fl_owner;
-	unsigned int fl_pid;
-	wait_queue_head_t fl_wait;
-	struct file *fl_file;
-	unsigned char fl_flags;
-	unsigned char fl_type;
-	loff_t fl_start;
-	loff_t fl_end;
-
-	struct fasync_struct *	fl_fasync; /* for lease break notifications */
-	unsigned long fl_break_time;	/* for nonblocking lease breaks */
-
-	struct file_lock_operations *fl_ops;	/* Callbacks for filesystems */
-	struct lock_manager_operations *fl_lmops;	/* Callbacks for lockmanagers */
-	union {
-		struct nfs_lock_info	nfs_fl;
-		struct nfs4_lock_info	nfs4_fl;
-	} fl_u;
-};
-
-/* The following constant reflects the upper bound of the file/locking space */
-#ifndef OFFSET_MAX
-#define INT_LIMIT(x)	(~((x)1 << (sizeof(x)*8 - 1)))
-#define OFFSET_MAX	INT_LIMIT(loff_t)
-#define OFFT_OFFSET_MAX	INT_LIMIT(off_t)
-#endif
-
-#include <linux/fcntl.h>
-
-extern int fcntl_getlk(struct file *, struct flock __user *);
-extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
-			struct flock __user *);
-
-#if BITS_PER_LONG == 32
-extern int fcntl_getlk64(struct file *, struct flock64 __user *);
-extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
-			struct flock64 __user *);
-#endif
-
-extern void send_sigio(struct fown_struct *fown, int fd, int band);
-extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
-extern int fcntl_getlease(struct file *filp);
-
-/* fs/sync.c */
-extern int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte,
-			unsigned int flags);
-
-/* fs/locks.c */
-extern void locks_init_lock(struct file_lock *);
-extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void locks_remove_posix(struct file *, fl_owner_t);
-extern void locks_remove_flock(struct file *);
-extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file(struct file *, struct file_lock *);
-extern int posix_lock_file_wait(struct file *, struct file_lock *);
-extern int posix_unblock_lock(struct file *, struct file_lock *);
-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags);
-extern void lease_get_mtime(struct inode *, struct timespec *time);
-extern int setlease(struct file *, long, struct file_lock **);
-extern int lease_modify(struct file_lock **, int);
-extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
-extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
-
-struct fasync_struct {
-	int	magic;
-	int	fa_fd;
-	struct	fasync_struct	*fa_next; /* singly linked list */
-	struct	file 		*fa_file;
-};
-
-#define FASYNC_MAGIC 0x4601
-
-/* SMP safe fasync helpers: */
-extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
-/* can be called from interrupts */
-extern void kill_fasync(struct fasync_struct **, int, int);
-/* only for net: no internal synchronization */
-extern void __kill_fasync(struct fasync_struct *, int, int);
-
-extern int f_setown(struct file *filp, unsigned long arg, int force);
-extern void f_delown(struct file *filp);
-extern int send_sigurg(struct fown_struct *fown);
-
-/*
- *	Umount options
- */
-
-#define MNT_FORCE	0x00000001	/* Attempt to forcibily umount */
-#define MNT_DETACH	0x00000002	/* Just detach from the tree */
-#define MNT_EXPIRE	0x00000004	/* Mark for expiry */
-
-extern struct list_head super_blocks;
-extern spinlock_t sb_lock;
-
-#define sb_entry(list)	list_entry((list), struct super_block, s_list)
-#define S_BIAS (1<<30)
-struct super_block {
-	struct list_head	s_list;		/* Keep this first */
-	dev_t			s_dev;		/* search index; _not_ kdev_t */
-	unsigned long		s_blocksize;
-	unsigned char		s_blocksize_bits;
-	unsigned char		s_dirt;
-	unsigned long long	s_maxbytes;	/* Max file size */
-	struct file_system_type	*s_type;
-	struct super_operations	*s_op;
-	struct dquot_operations	*dq_op;
- 	struct quotactl_ops	*s_qcop;
-	struct export_operations *s_export_op;
-	unsigned long		s_flags;
-	unsigned long		s_magic;
-	struct dentry		*s_root;
-	struct rw_semaphore	s_umount;
-	struct mutex		s_lock;
-	int			s_count;
-	int			s_syncing;
-	int			s_need_sync_fs;
-	atomic_t		s_active;
-	void                    *s_security;
-	struct xattr_handler	**s_xattr;
-
-	struct list_head	s_inodes;	/* all inodes */
-	struct list_head	s_dirty;	/* dirty inodes */
-	struct list_head	s_io;		/* parked for writeback */
-	struct hlist_head	s_anon;		/* anonymous dentries for (nfs) exporting */
-	struct list_head	s_files;
-
-	struct block_device	*s_bdev;
-	struct list_head	s_instances;
-	struct quota_info	s_dquot;	/* Diskquota specific options */
-
-	int			s_frozen;
-	wait_queue_head_t	s_wait_unfrozen;
-
-	char s_id[32];				/* Informational name */
-
-	void 			*s_fs_info;	/* Filesystem private info */
-
-	/*
-	 * The next field is for VFS *only*. No filesystems have any business
-	 * even looking at it. You had been warned.
-	 */
-	struct mutex s_vfs_rename_mutex;	/* Kludge */
-
-	/* Granularity of c/m/atime in ns.
-	   Cannot be worse than a second */
-	u32		   s_time_gran;
-};
-
-extern struct timespec current_fs_time(struct super_block *sb);
-
-/*
- * Snapshotting support.
- */
-enum {
-	SB_UNFROZEN = 0,
-	SB_FREEZE_WRITE	= 1,
-	SB_FREEZE_TRANS = 2,
-};
-
-#define vfs_check_frozen(sb, level) \
-	wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
-
-static inline void get_fs_excl(void)
+static inline void drop_nlink(struct inode *inode)
 {
-	atomic_inc(&current->fs_excl);
-}
-
-static inline void put_fs_excl(void)
-{
-	atomic_dec(&current->fs_excl);
-}
-
-static inline int has_fs_excl(void)
-{
-	return atomic_read(&current->fs_excl);
-}
-
-
-/*
- * Superblock locking.
- */
-static inline void lock_super(struct super_block * sb)
-{
-	get_fs_excl();
-	mutex_lock(&sb->s_lock);
-}
-
-static inline void unlock_super(struct super_block * sb)
-{
-	put_fs_excl();
-	mutex_unlock(&sb->s_lock);
-}
-
-/*
- * VFS helper functions..
- */
-extern int vfs_permission(struct nameidata *, int);
-extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
-extern int vfs_mkdir(struct inode *, struct dentry *, int);
-extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, const char *, int);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
-extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
-
-/*
- * VFS dentry helper functions.
- */
-extern void dentry_unhash(struct dentry *dentry);
-
-/*
- * VFS file helper functions.
- */
-extern int file_permission(struct file *, int);
-
-/*
- * File types
- *
- * NOTE! These match bits 12..15 of stat.st_mode
- * (ie "(i_mode >> 12) & 15").
- */
-#define DT_UNKNOWN	0
-#define DT_FIFO		1
-#define DT_CHR		2
-#define DT_DIR		4
-#define DT_BLK		6
-#define DT_REG		8
-#define DT_LNK		10
-#define DT_SOCK		12
-#define DT_WHT		14
-
-#define OSYNC_METADATA	(1<<0)
-#define OSYNC_DATA	(1<<1)
-#define OSYNC_INODE	(1<<2)
-int generic_osync_inode(struct inode *, struct address_space *, int);
-
-/*
- * This is the "filldir" function type, used by readdir() to let
- * the kernel specify what kind of dirent layout it wants to have.
- * This allows the kernel to read directories into kernel space or
- * to have different dirent layouts depending on the binary type.
- */
-typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
-
-struct block_device_operations {
-	int (*open) (struct inode *, struct file *);
-	int (*release) (struct inode *, struct file *);
-	int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
-	long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
-	long (*compat_ioctl) (struct file *, unsigned, unsigned long);
-	int (*direct_access) (struct block_device *, sector_t, unsigned long *);
-	int (*media_changed) (struct gendisk *);
-	int (*revalidate_disk) (struct gendisk *);
-	int (*getgeo)(struct block_device *, struct hd_geometry *);
-	struct module *owner;
-};
-
-/*
- * "descriptor" for what we're up to with a read for sendfile().
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
-	size_t written;
-	size_t count;
-	union {
-		char __user * buf;
-		void *data;
-	} arg;
-	int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long);
-
-/* These macros are for out of kernel modules to test that
- * the kernel supports the unlocked_ioctl and compat_ioctl
- * fields in struct file_operations. */
-#define HAVE_COMPAT_IOCTL 1
-#define HAVE_UNLOCKED_IOCTL 1
-
-/*
- * NOTE:
- * read, write, poll, fsync, readv, writev, unlocked_ioctl and compat_ioctl
- * can be called without the big kernel lock held in all filesystems.
- */
-struct file_operations {
-	struct module *owner;
-	loff_t (*llseek) (struct file *, loff_t, int);
-	ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
-	ssize_t (*aio_read) (struct kiocb *, char __user *, size_t, loff_t);
-	ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
-	ssize_t (*aio_write) (struct kiocb *, const char __user *, size_t, loff_t);
-	int (*readdir) (struct file *, void *, filldir_t);
-	unsigned int (*poll) (struct file *, struct poll_table_struct *);
-	int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
-	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
-	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
-	int (*mmap) (struct file *, struct vm_area_struct *);
-	int (*open) (struct inode *, struct file *);
-	int (*flush) (struct file *, fl_owner_t id);
-	int (*release) (struct inode *, struct file *);
-	int (*fsync) (struct file *, struct dentry *, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
-	int (*fasync) (int, struct file *, int);
-	int (*lock) (struct file *, int, struct file_lock *);
-	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, loff_t *);
-	ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
-	ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
-	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
-	unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
-	int (*check_flags)(int);
-	int (*dir_notify)(struct file *filp, unsigned long arg);
-	int (*flock) (struct file *, int, struct file_lock *);
-	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
-	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
-};
-
-struct inode_operations {
-	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
-	struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
-	int (*link) (struct dentry *,struct inode *,struct dentry *);
-	int (*unlink) (struct inode *,struct dentry *);
-	int (*symlink) (struct inode *,struct dentry *,const char *);
-	int (*mkdir) (struct inode *,struct dentry *,int);
-	int (*rmdir) (struct inode *,struct dentry *);
-	int (*mknod) (struct inode *,struct dentry *,int,dev_t);
-	int (*rename) (struct inode *, struct dentry *,
-			struct inode *, struct dentry *);
-	int (*readlink) (struct dentry *, char __user *,int);
-	void * (*follow_link) (struct dentry *, struct nameidata *);
-	void (*put_link) (struct dentry *, struct nameidata *, void *);
-	void (*truncate) (struct inode *);
-	int (*permission) (struct inode *, int, struct nameidata *);
-	int (*setattr) (struct dentry *, struct iattr *);
-	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
-	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
-	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
-	ssize_t (*listxattr) (struct dentry *, char *, size_t);
-	int (*removexattr) (struct dentry *, const char *);
-	void (*truncate_range)(struct inode *, loff_t, loff_t);
-};
-
-struct seq_file;
-
-extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
-extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
-		unsigned long, loff_t *);
-extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
-		unsigned long, loff_t *);
-
-/*
- * NOTE: write_inode, delete_inode, clear_inode, put_inode can be called
- * without the big kernel lock held in all filesystems.
- */
-struct super_operations {
-   	struct inode *(*alloc_inode)(struct super_block *sb);
-	void (*destroy_inode)(struct inode *);
-
-	void (*read_inode) (struct inode *);
-  
-   	void (*dirty_inode) (struct inode *);
-	int (*write_inode) (struct inode *, int);
-	void (*put_inode) (struct inode *);
-	void (*drop_inode) (struct inode *);
-	void (*delete_inode) (struct inode *);
-	void (*put_super) (struct super_block *);
-	void (*write_super) (struct super_block *);
-	int (*sync_fs)(struct super_block *sb, int wait);
-	void (*write_super_lockfs) (struct super_block *);
-	void (*unlockfs) (struct super_block *);
-	int (*statfs) (struct dentry *, struct kstatfs *);
-	int (*remount_fs) (struct super_block *, int *, char *);
-	void (*clear_inode) (struct inode *);
-	void (*umount_begin) (struct vfsmount *, int);
-
-	int (*show_options)(struct seq_file *, struct vfsmount *);
-	int (*show_stats)(struct seq_file *, struct vfsmount *);
-
-	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
-	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
-};
-
-/* Inode state bits.  Protected by inode_lock. */
-#define I_DIRTY_SYNC		1 /* Not dirty enough for O_DATASYNC */
-#define I_DIRTY_DATASYNC	2 /* Data-related inode changes pending */
-#define I_DIRTY_PAGES		4 /* Data-related inode changes pending */
-#define __I_LOCK		3
-#define I_LOCK			(1 << __I_LOCK)
-#define I_FREEING		16
-#define I_CLEAR			32
-#define I_NEW			64
-#define I_WILL_FREE		128
-
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
-
-extern void __mark_inode_dirty(struct inode *, int);
-static inline void mark_inode_dirty(struct inode *inode)
-{
-	__mark_inode_dirty(inode, I_DIRTY);
-}
-
-static inline void mark_inode_dirty_sync(struct inode *inode)
-{
-	__mark_inode_dirty(inode, I_DIRTY_SYNC);
-}
-
-static inline void inc_nlink(struct inode *inode)
-{
-	inode->i_nlink++;
-}
-
-static inline void inode_inc_link_count(struct inode *inode)
-{
-	inc_nlink(inode);
-	mark_inode_dirty(inode);
-}
-
-static inline void inode_dec_link_count(struct inode *inode)
-{
 	inode->i_nlink--;
-	mark_inode_dirty(inode);
 }
 
-extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
-static inline void file_accessed(struct file *file)
+static inline void clear_nlink(struct inode *inode)
 {
-	if (!(file->f_flags & O_NOATIME))
-		touch_atime(file->f_vfsmnt, file->f_dentry);
+	inode->i_nlink = 0;
 }
 
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
-
-struct file_system_type {
-	const char *name;
-	int fs_flags;
-	int (*get_sb) (struct file_system_type *, int,
-		       const char *, void *, struct vfsmount *);
-	void (*kill_sb) (struct super_block *);
-	struct module *owner;
-	struct file_system_type * next;
-	struct list_head fs_supers;
-	struct lock_class_key s_lock_key;
-	struct lock_class_key s_umount_key;
-};
-
-extern int get_sb_bdev(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data,
-	int (*fill_super)(struct super_block *, void *, int),
-	struct vfsmount *mnt);
-extern int get_sb_single(struct file_system_type *fs_type,
-	int flags, void *data,
-	int (*fill_super)(struct super_block *, void *, int),
-	struct vfsmount *mnt);
-extern int get_sb_nodev(struct file_system_type *fs_type,
-	int flags, void *data,
-	int (*fill_super)(struct super_block *, void *, int),
-	struct vfsmount *mnt);
-void generic_shutdown_super(struct super_block *sb);
-void kill_block_super(struct super_block *sb);
-void kill_anon_super(struct super_block *sb);
-void kill_litter_super(struct super_block *sb);
-void deactivate_super(struct super_block *sb);
-int set_anon_super(struct super_block *s, void *data);
-struct super_block *sget(struct file_system_type *type,
-			int (*test)(struct super_block *,void *),
-			int (*set)(struct super_block *,void *),
-			void *data);
-extern int get_sb_pseudo(struct file_system_type *, char *,
-	struct super_operations *ops, unsigned long,
-	struct vfsmount *mnt);
-extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
-int __put_super(struct super_block *sb);
-int __put_super_and_need_restart(struct super_block *sb);
-void unnamed_dev_init(void);
-
-/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
-#define fops_get(fops) \
-	(((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
-#define fops_put(fops) \
-	do { if (fops) module_put((fops)->owner); } while(0)
-
-extern int register_filesystem(struct file_system_type *);
-extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount(struct file_system_type *);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern void umount_tree(struct vfsmount *, int, struct list_head *);
-extern void release_mounts(struct list_head *);
-extern long do_mount(char *, char *, char *, unsigned long, void *);
-extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
-extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
-				  struct vfsmount *);
-extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *);
-extern void drop_collected_mounts(struct vfsmount *);
-
-extern int vfs_statfs(struct dentry *, struct kstatfs *);
-
-/* /sys/fs */
-extern struct subsystem fs_subsys;
-
-#define FLOCK_VERIFY_READ  1
-#define FLOCK_VERIFY_WRITE 2
-
-extern int locks_mandatory_locked(struct inode *);
-extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit -  an otherwise meaningless combination.
- */
-#define MANDATORY_LOCK(inode) \
-	(IS_MANDLOCK(inode) && ((inode)->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
-
-static inline int locks_verify_locked(struct inode *inode)
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
 {
-	if (MANDATORY_LOCK(inode))
-		return locks_mandatory_locked(inode);
-	return 0;
-}
+	int rc;
 
-extern int rw_verify_area(int, struct file *, loff_t *, size_t);
-
-static inline int locks_verify_truncate(struct inode *inode,
-				    struct file *filp,
-				    loff_t size)
-{
-	if (inode->i_flock && MANDATORY_LOCK(inode))
-		return locks_mandatory_area(
-			FLOCK_VERIFY_WRITE, inode, filp,
-			size < inode->i_size ? size : inode->i_size,
-			(size < inode->i_size ? inode->i_size - size
-			 : size - inode->i_size)
-		);
+	if (filp->f_op && filp->f_op->lock)
+		return filp->f_op->lock(filp, F_GETLK, fl);
+	rc = posix_test_lock(filp, fl, fl);
+	if (rc == 0)
+		fl->fl_type = F_UNLCK;
 	return 0;
 }
 
-static inline int break_lease(struct inode *inode, unsigned int mode)
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
 {
-	if (inode->i_flock)
-		return __break_lease(inode, mode);
-	return 0;
+	if (filp->f_op && filp->f_op->lock)
+		return filp->f_op->lock(filp, cmd, fl);
+	else
+		return  posix_lock_file_conf(filp, fl, conf);
 }
 
-/* fs/open.c */
-
-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
-		       struct file *filp);
-extern long do_sys_open(int fdf, const char __user *filename, int flags,
-			int mode);
-extern struct file *filp_open(const char *, int, int);
-extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
-extern int filp_close(struct file *, fl_owner_t id);
-extern char * getname(const char __user *);
-
-/* fs/dcache.c */
-extern void __init vfs_caches_init_early(void);
-extern void __init vfs_caches_init(unsigned long);
-
-#define __getname()	kmem_cache_alloc(names_cachep, SLAB_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifndef CONFIG_AUDITSYSCALL
-#define putname(name)   __putname(name)
-#else
-extern void putname(const char *name);
-#endif
-
-extern int register_blkdev(unsigned int, const char *);
-extern int unregister_blkdev(unsigned int, const char *);
-extern struct block_device *bdget(dev_t);
-extern void bd_set_size(struct block_device *, loff_t size);
-extern void bd_forget(struct inode *inode);
-extern void bdput(struct block_device *);
-extern struct block_device *open_by_devnum(dev_t, unsigned);
-extern const struct file_operations def_blk_fops;
-extern const struct address_space_operations def_blk_aops;
-extern const struct file_operations def_chr_fops;
-extern const struct file_operations bad_sock_fops;
-extern const struct file_operations def_fifo_fops;
-extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
-extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
-extern int blkdev_driver_ioctl(struct inode *inode, struct file *file,
-			       struct gendisk *disk, unsigned cmd,
-			       unsigned long arg);
-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *, mode_t, unsigned);
-extern int blkdev_put(struct block_device *);
-extern int bd_claim(struct block_device *, void *);
-extern void bd_release(struct block_device *);
-#ifdef CONFIG_SYSFS
-extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
-extern void bd_release_from_disk(struct block_device *, struct gendisk *);
-#else
-#define bd_claim_by_disk(bdev, holder, disk)	bd_claim(bdev, holder)
-#define bd_release_from_disk(bdev, disk)	bd_release(bdev)
-#endif
-
-/* fs/char_dev.c */
-#define CHRDEV_MAJOR_HASH_SIZE	255
-extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
-extern int register_chrdev_region(dev_t, unsigned, const char *);
-extern int register_chrdev(unsigned int, const char *,
-			   const struct file_operations *);
-extern int unregister_chrdev(unsigned int, const char *);
-extern void unregister_chrdev_region(dev_t, unsigned);
-extern int chrdev_open(struct inode *, struct file *);
-extern void chrdev_show(struct seq_file *,off_t);
-
-/* fs/block_dev.c */
-#define BLKDEV_MAJOR_HASH_SIZE	255
-#define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
-extern const char *__bdevname(dev_t, char *buffer);
-extern const char *bdevname(struct block_device *bdev, char *buffer);
-extern struct block_device *lookup_bdev(const char *);
-extern struct block_device *open_bdev_excl(const char *, int, void *);
-extern void close_bdev_excl(struct block_device *);
-extern void blkdev_show(struct seq_file *,off_t);
-
-extern void init_special_inode(struct inode *, umode_t, dev_t);
-
-/* Invalid inode operations -- fs/bad_inode.c */
-extern void make_bad_inode(struct inode *);
-extern int is_bad_inode(struct inode *);
-
-extern const struct file_operations read_fifo_fops;
-extern const struct file_operations write_fifo_fops;
-extern const struct file_operations rdwr_fifo_fops;
-
-extern int fs_may_remount_ro(struct super_block *);
-
-/*
- * return READ, READA, or WRITE
- */
-#define bio_rw(bio)		((bio)->bi_rw & (RW_MASK | RWA_MASK))
-
-/*
- * return data direction, READ or WRITE
- */
-#define bio_data_dir(bio)	((bio)->bi_rw & 1)
-
-extern int check_disk_change(struct block_device *);
-extern int invalidate_inodes(struct super_block *);
-extern int __invalidate_device(struct block_device *);
-extern int invalidate_partition(struct gendisk *, int);
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
-					pgoff_t start, pgoff_t end);
-unsigned long invalidate_inode_pages(struct address_space *mapping);
-static inline void invalidate_remote_inode(struct inode *inode)
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
 {
-	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-	    S_ISLNK(inode->i_mode))
-		invalidate_inode_pages(inode->i_mapping);
-}
-extern int invalidate_inode_pages2(struct address_space *mapping);
-extern int invalidate_inode_pages2_range(struct address_space *mapping,
-					 pgoff_t start, pgoff_t end);
-extern int write_inode_now(struct inode *, int);
-extern int filemap_fdatawrite(struct address_space *);
-extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait(struct address_space *);
-extern int filemap_write_and_wait(struct address_space *mapping);
-extern int filemap_write_and_wait_range(struct address_space *mapping,
-				        loff_t lstart, loff_t lend);
-extern int wait_on_page_writeback_range(struct address_space *mapping,
-				pgoff_t start, pgoff_t end);
-extern int __filemap_fdatawrite_range(struct address_space *mapping,
-				loff_t start, loff_t end, int sync_mode);
-
-extern long do_fsync(struct file *file, int datasync);
-extern void sync_supers(void);
-extern void sync_filesystems(int wait);
-extern void emergency_sync(void);
-extern void emergency_remount(void);
-extern int do_remount_sb(struct super_block *sb, int flags,
-			 void *data, int force);
-extern sector_t bmap(struct inode *, sector_t);
-extern int notify_change(struct dentry *, struct iattr *);
-extern int permission(struct inode *, int, struct nameidata *);
-extern int generic_permission(struct inode *, int,
-		int (*check_acl)(struct inode *, int));
-
-extern int get_write_access(struct inode *);
-extern int deny_write_access(struct file *);
-static inline void put_write_access(struct inode * inode)
-{
-	atomic_dec(&inode->i_writecount);
-}
-static inline void allow_write_access(struct file *file)
-{
-	if (file)
-		atomic_inc(&file->f_dentry->d_inode->i_writecount);
-}
-extern int do_pipe(int *);
-
-extern int open_namei(int dfd, const char *, int, int, struct nameidata *);
-extern int may_open(struct nameidata *, int, int);
-
-extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
-extern struct file * open_exec(const char *);
- 
-/* fs/dcache.c -- generic fs support functions */
-extern int is_subdir(struct dentry *, struct dentry *);
-extern ino_t find_inode_number(struct dentry *, struct qstr *);
-
-#include <linux/err.h>
-
-/* needed for stackable file system support */
-extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
-
-extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
-
-extern void inode_init_once(struct inode *);
-extern void iput(struct inode *);
-extern struct inode * igrab(struct inode *);
-extern ino_t iunique(struct super_block *, ino_t);
-extern int inode_needs_sync(struct inode *inode);
-extern void generic_delete_inode(struct inode *inode);
-extern void generic_drop_inode(struct inode *inode);
-
-extern struct inode *ilookup5_nowait(struct super_block *sb,
-		unsigned long hashval, int (*test)(struct inode *, void *),
-		void *data);
-extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
-		int (*test)(struct inode *, void *), void *data);
-extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
-
-extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
-extern struct inode * iget_locked(struct super_block *, unsigned long);
-extern void unlock_new_inode(struct inode *);
-
-static inline struct inode *iget(struct super_block *sb, unsigned long ino)
-{
-	struct inode *inode = iget_locked(sb, ino);
-	
-	if (inode && (inode->i_state & I_NEW)) {
-		sb->s_op->read_inode(inode);
-		unlock_new_inode(inode);
-	}
-
-	return inode;
-}
-
-extern void __iget(struct inode * inode);
-extern void clear_inode(struct inode *);
-extern void destroy_inode(struct inode *);
-extern struct inode *new_inode(struct super_block *);
-extern int remove_suid(struct dentry *);
-extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
-
-extern void __insert_inode_hash(struct inode *, unsigned long hashval);
-extern void remove_inode_hash(struct inode *);
-static inline void insert_inode_hash(struct inode *inode) {
-	__insert_inode_hash(inode, inode->i_ino);
-}
-
-extern struct file * get_empty_filp(void);
-extern void file_move(struct file *f, struct list_head *list);
-extern void file_kill(struct file *f);
-struct bio;
-extern void submit_bio(int, struct bio *);
-extern int bdev_read_only(struct block_device *);
-extern int set_blocksize(struct block_device *, int);
-extern int sb_set_blocksize(struct super_block *, int);
-extern int sb_min_blocksize(struct super_block *, int);
-
-extern int generic_file_mmap(struct file *, struct vm_area_struct *);
-extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
-extern int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
-extern ssize_t generic_file_read(struct file *, char __user *, size_t, loff_t *);
-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
-extern ssize_t generic_file_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t generic_file_aio_read(struct kiocb *, char __user *, size_t, loff_t);
-extern ssize_t __generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t *);
-extern ssize_t generic_file_aio_write(struct kiocb *, const char __user *, size_t, loff_t);
-extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *,
-		unsigned long, loff_t *);
-extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
-		unsigned long *, loff_t, loff_t *, size_t, size_t);
-extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
-		unsigned long, loff_t, loff_t *, size_t, ssize_t);
-extern int generic_file_buffered_write_one_kernel_page(struct address_space *,
-						       pgoff_t, struct page *);
-extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
-ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov,
-				unsigned long nr_segs, loff_t *ppos);
-extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
-extern void do_generic_mapping_read(struct address_space *mapping,
-				    struct file_ra_state *, struct file *,
-				    loff_t *, read_descriptor_t *, read_actor_t, int);
-
-/* fs/splice.c */
-extern ssize_t generic_file_splice_read(struct file *, loff_t *,
-		struct pipe_inode_info *, size_t, unsigned int);
-extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
-		struct file *, loff_t *, size_t, unsigned int);
-extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
-		struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
-		size_t len, unsigned int flags);
-
-extern void
-file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
-extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, 
-	unsigned long nr_segs, loff_t *ppos);
-ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, 
-			unsigned long nr_segs, loff_t *ppos);
-extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
-extern int generic_file_open(struct inode * inode, struct file * filp);
-extern int nonseekable_open(struct inode * inode, struct file * filp);
-
-#ifdef CONFIG_FS_XIP
-extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
-			     loff_t *ppos);
-extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos,
-				 size_t count, read_actor_t actor,
-				 void *target);
-extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
-extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
-			      size_t len, loff_t *ppos);
-extern int xip_truncate_page(struct address_space *mapping, loff_t from);
-#else
-static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
-{
+	if (filp->f_op && filp->f_op->lock)
+		return filp->f_op->lock(filp, F_CANCELLK, fl);
 	return 0;
 }
-#endif
 
-static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
-					read_descriptor_t * desc,
-					read_actor_t actor, int nonblock)
-{
-	do_generic_mapping_read(filp->f_mapping,
-				&filp->f_ra,
-				filp,
-				ppos,
-				desc,
-				actor,
-				nonblock);
-}
-
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-	struct block_device *bdev, const struct iovec *iov, loff_t offset,
-	unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-	int lock_type);
-
-enum {
-	DIO_LOCKING = 1, /* need locking between buffered and direct access */
-	DIO_NO_LOCKING,  /* bdev; no locking at all between buffered/direct */
-	DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
-};
-
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_block_t get_block,
-	dio_iodone_t end_io)
-{
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_block, end_io, DIO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_block_t get_block,
-	dio_iodone_t end_io)
-{
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_block, end_io, DIO_NO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_block_t get_block,
-	dio_iodone_t end_io)
-{
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_block, end_io, DIO_OWN_LOCKING);
-}
-
-extern const struct file_operations generic_ro_fops;
-
-#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
-
-extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
-extern int vfs_follow_link(struct nameidata *, const char *);
-extern int page_readlink(struct dentry *, char __user *, int);
-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *, void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
-		gfp_t gfp_mask);
-extern int page_symlink(struct inode *inode, const char *symname, int len);
-extern struct inode_operations page_symlink_inode_operations;
-extern int generic_readlink(struct dentry *, char __user *, int);
-extern void generic_fillattr(struct inode *, struct kstat *);
-extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-void inode_add_bytes(struct inode *inode, loff_t bytes);
-void inode_sub_bytes(struct inode *inode, loff_t bytes);
-loff_t inode_get_bytes(struct inode *inode);
-void inode_set_bytes(struct inode *inode, loff_t bytes);
-
-extern int vfs_readdir(struct file *, filldir_t, void *);
-
-extern int vfs_stat(char __user *, struct kstat *);
-extern int vfs_lstat(char __user *, struct kstat *);
-extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
-extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
-extern int vfs_fstat(unsigned int, struct kstat *);
-
-extern int vfs_ioctl(struct file *, unsigned int, unsigned int, unsigned long);
-
-extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_super(struct block_device *);
-extern struct super_block *user_get_super(dev_t);
-extern void drop_super(struct super_block *sb);
-
-extern int dcache_dir_open(struct inode *, struct file *);
-extern int dcache_dir_close(struct inode *, struct file *);
-extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
-extern int dcache_readdir(struct file *, void *, filldir_t);
-extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int simple_statfs(struct dentry *, struct kstatfs *);
-extern int simple_link(struct dentry *, struct inode *, struct dentry *);
-extern int simple_unlink(struct inode *, struct dentry *);
-extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
-extern int simple_sync_file(struct file *, struct dentry *, int);
-extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
-extern int simple_prepare_write(struct file *file, struct page *page,
-			unsigned offset, unsigned to);
-extern int simple_commit_write(struct file *file, struct page *page,
-				unsigned offset, unsigned to);
-
-extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
-extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
-extern const struct file_operations simple_dir_operations;
-extern struct inode_operations simple_dir_inode_operations;
-struct tree_descr { char *name; const struct file_operations *ops; int mode; };
-struct dentry *d_alloc_name(struct dentry *, const char *);
-extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
-extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
-extern void simple_release_fs(struct vfsmount **mount, int *count);
-
-extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
-
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct address_space *,
-				struct page *, struct page *);
-#else
-#define buffer_migrate_page NULL
 #endif
-
-extern int inode_change_ok(struct inode *, struct iattr *);
-extern int __must_check inode_setattr(struct inode *, struct iattr *);
-
-extern void file_update_time(struct file *file);
-
-static inline ino_t parent_ino(struct dentry *dentry)
-{
-	ino_t res;
-
-	spin_lock(&dentry->d_lock);
-	res = dentry->d_parent->d_inode->i_ino;
-	spin_unlock(&dentry->d_lock);
-	return res;
-}
-
-/* kernel/fork.c */
-extern int unshare_files(void);
-
-/* Transaction based IO helpers */
-
-/*
- * An argresp is stored in an allocated page and holds the
- * size of the argument or response, along with its content
- */
-struct simple_transaction_argresp {
-	ssize_t size;
-	char data[0];
-};
-
-#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
-
-char *simple_transaction_get(struct file *file, const char __user *buf,
-				size_t size);
-ssize_t simple_transaction_read(struct file *file, char __user *buf,
-				size_t size, loff_t *pos);
-int simple_transaction_release(struct inode *inode, struct file *file);
-
-static inline void simple_transaction_set(struct file *file, size_t n)
-{
-	struct simple_transaction_argresp *ar = file->private_data;
-
-	BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
-
-	/*
-	 * The barrier ensures that ar->size will really remain zero until
-	 * ar->data is ready for reading.
-	 */
-	smp_mb();
-	ar->size = n;
-}
-
-/*
- * simple attribute files
- *
- * These attributes behave similar to those in sysfs:
- *
- * Writing to an attribute immediately sets a value, an open file can be
- * written to multiple times.
- *
- * Reading from an attribute creates a buffer from the value that might get
- * read with multiple read calls. When the attribute has been read
- * completely, no further read calls are possible until the file is opened
- * again.
- *
- * All attributes contain a text representation of a numeric value
- * that are accessed with the get() and set() functions.
- */
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)		\
-static int __fops ## _open(struct inode *inode, struct file *file)	\
-{									\
-	__simple_attr_check_format(__fmt, 0ull);			\
-	return simple_attr_open(inode, file, __get, __set, __fmt);	\
-}									\
-static struct file_operations __fops = {				\
-	.owner	 = THIS_MODULE,						\
-	.open	 = __fops ## _open,					\
-	.release = simple_attr_close,					\
-	.read	 = simple_attr_read,					\
-	.write	 = simple_attr_write,					\
-};
-
-static inline void __attribute__((format(printf, 1, 2)))
-__simple_attr_check_format(const char *fmt, ...)
-{
-	/* don't do anything, just let the compiler check the arguments; */
-}
-
-int simple_attr_open(struct inode *inode, struct file *file,
-		     u64 (*get)(void *), void (*set)(void *, u64),
-		     const char *fmt);
-int simple_attr_close(struct inode *inode, struct file *file);
-ssize_t simple_attr_read(struct file *file, char __user *buf,
-			 size_t len, loff_t *ppos);
-ssize_t simple_attr_write(struct file *file, const char __user *buf,
-			  size_t len, loff_t *ppos);
-
-
-#ifdef CONFIG_SECURITY
-static inline char *alloc_secdata(void)
-{
-	return (char *)get_zeroed_page(GFP_KERNEL);
-}
-
-static inline void free_secdata(void *secdata)
-{
-	free_page((unsigned long)secdata);
-}
-#else
-static inline char *alloc_secdata(void)
-{
-	return (char *)1;
-}
-
-static inline void free_secdata(void *secdata)
-{ }
-#endif	/* CONFIG_SECURITY */
-
-/**
- *  * drop_nlink - directly drop an inode's link count
- *   * @inode: inode
- *    *
- *     * This is a low-level filesystem helper to replace any
- *      * direct filesystem manipulation of i_nlink.  In cases
- *       * where we are attempting to track writes to the
- *        * filesystem, a decrement to zero means an imminent
- *         * write when the file is truncated and actually unlinked
- *          * on the filesystem.
- *           */
-static inline void drop_nlink(struct inode *inode)
-{
-	        inode->i_nlink--;
-}
-
-/**
- *  * clear_nlink - directly zero an inode's link count
- *   * @inode: inode
- *    * 
- *     * This is a low-level filesystem helper to replace any
- *      * direct filesystem manipulation of i_nlink.  See
- *       * drop_nlink() for why we care about i_nlink hitting zero.
- *        **/
-static inline void clear_nlink(struct inode *inode)
-{
-	        inode->i_nlink = 0;
-}
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_FS_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/highmem.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/highmem.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/highmem.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,36 @@
+#ifndef LINUX_HIGHMEM_H
+#define LINUX_HIGHMEM_H
+
+#include_next <linux/highmem.h>
+
+static inline void zero_user_segments(struct page *page,
+	unsigned start1, unsigned end1,
+	unsigned start2, unsigned end2)
+{
+	void *kaddr = kmap_atomic(page, KM_USER0);
+
+	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+
+	if (end1 > start1)
+		memset(kaddr + start1, 0, end1 - start1);
+
+	if (end2 > start2)
+		memset(kaddr + start2, 0, end2 - start2);
+
+	kunmap_atomic(kaddr, KM_USER0);
+	flush_dcache_page(page);
+}
+
+static inline void zero_user_segment(struct page *page,
+	unsigned start, unsigned end)
+{
+	zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+	unsigned start, unsigned size)
+{
+	zero_user_segments(page, start, start + size, 0, 0);
+}
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/if_vlan.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/if_vlan.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/if_vlan.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -21,4 +21,12 @@
 	return vlan_dev_info(dev)->vlan_id;
 }
 
+#define vlan_dev_real_dev(netdev) (VLAN_DEV_INFO(netdev)->real_dev)
+#define vlan_dev_vlan_id(netdev) (VLAN_DEV_INFO(netdev)->vlan_id)
+
+static inline int is_vlan_dev(struct net_device *dev)
+{
+	return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/inet.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/inet.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/inet.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,4 +6,203 @@
 #define INET_ADDRSTRLEN		(16)
 #define INET6_ADDRSTRLEN	(48)
 
+#define IN6PTON_XDIGIT	  	0x00010000
+#define IN6PTON_DIGIT	   	0x00020000
+#define IN6PTON_COLON_MASK      0x00700000
+#define IN6PTON_COLON_1		0x00100000      /* single : requested */
+#define IN6PTON_COLON_2	 	0x00200000      /* second : requested */
+#define IN6PTON_COLON_1_2       0x00400000      /* :: requested */
+#define IN6PTON_DOT	     	0x00800000      /* . */
+#define IN6PTON_DELIM	   	0x10000000
+#define IN6PTON_NULL	    	0x20000000      /* first/tail */
+#define IN6PTON_UNKNOWN	 	0x40000000
+
+static inline int xdigit2bin(char c, int delim)
+{
+	if (c == delim || c == '\0')
+		return IN6PTON_DELIM;
+	if (c == ':')
+		return IN6PTON_COLON_MASK;
+	if (c == '.')
+		return IN6PTON_DOT;
+	if (c >= '0' && c <= '9')
+		return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0'));
+	if (c >= 'a' && c <= 'f')
+		return (IN6PTON_XDIGIT | (c - 'a' + 10));
+	if (c >= 'A' && c <= 'F')
+		return (IN6PTON_XDIGIT | (c - 'A' + 10));
+	if (delim == -1)
+		return IN6PTON_DELIM;
+	return IN6PTON_UNKNOWN;
+}
+
+static inline int in4_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s;
+	u8 *d;
+	u8 dbuf[4];
+	int ret = 0;
+	int i;
+	int w = 0;
+
+	if (srclen < 0)
+		srclen = strlen(src);
+	s = src;
+	d = dbuf;
+	i = 0;
+	while(1) {
+		int c;
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
+			goto out;
+		}
+		if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			if (w == 0)
+				goto out;
+			*d++ = w & 0xff;
+			w = 0;
+			i++;
+			if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+				if (i != 4)
+					goto out;
+				break;
+			}
+			goto cont;
+		}
+		w = (w * 10) + c;
+		if ((w & 0xffff) > 255) {
+			goto out;
+		}
+cont:
+		if (i >= 4)
+			goto out;
+		s++;
+		srclen--;
+	}
+	ret = 1;
+	memcpy(dst, dbuf, sizeof(dbuf));
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
+static inline int in6_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s, *tok = NULL;
+	u8 *d, *dc = NULL;
+	u8 dbuf[16];
+	int ret = 0;
+	int i;
+	int state = IN6PTON_COLON_1_2 | IN6PTON_XDIGIT | IN6PTON_NULL;
+	int w = 0;
+
+	memset(dbuf, 0, sizeof(dbuf));
+
+	s = src;
+	d = dbuf;
+	if (srclen < 0)
+		srclen = strlen(src);
+
+	while (1) {
+		int c;
+
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & state))
+			goto out;
+		if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			/* process one 16-bit word */
+			if (!(state & IN6PTON_NULL)) {
+				*d++ = (w >> 8) & 0xff;
+				*d++ = w & 0xff;
+			}
+			w = 0;
+			if (c & IN6PTON_DELIM) {
+				/* We've processed last word */
+				break;
+			}
+			/*
+			 * COLON_1 => XDIGIT
+			 * COLON_2 => XDIGIT|DELIM
+			 * COLON_1_2 => COLON_2
+			 */
+			switch (state & IN6PTON_COLON_MASK) {
+			case IN6PTON_COLON_2:
+				dc = d;
+				state = IN6PTON_XDIGIT | IN6PTON_DELIM;
+				if (dc - dbuf >= sizeof(dbuf))
+					state |= IN6PTON_NULL;
+				break;
+			case IN6PTON_COLON_1|IN6PTON_COLON_1_2:
+				state = IN6PTON_XDIGIT | IN6PTON_COLON_2;
+				break;
+			case IN6PTON_COLON_1:
+				state = IN6PTON_XDIGIT;
+				break;
+			case IN6PTON_COLON_1_2:
+				state = IN6PTON_COLON_2;
+				break;
+			default:
+				state = 0;
+			}
+			tok = s + 1;
+			goto cont;
+		}
+
+		if (c & IN6PTON_DOT) {
+			ret = in4_pton(tok ? tok : s, srclen + (int)(s - tok), d, delim, &s);
+			if (ret > 0) {
+				d += 4;
+				break;
+			}
+			goto out;
+		}
+
+		w = (w << 4) | (0xff & c);
+		state = IN6PTON_COLON_1 | IN6PTON_DELIM;
+		if (!(w & 0xf000)) {
+			state |= IN6PTON_XDIGIT;
+		}
+		if (!dc && d + 2 < dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_COLON_1_2;
+			state &= ~IN6PTON_DELIM;
+		}
+		if (d + 2 >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_COLON_1|IN6PTON_COLON_1_2);
+		}
+cont:
+		if ((dc && d + 4 < dbuf + sizeof(dbuf)) ||
+		    d + 4 == dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_DOT;
+		}
+		if (d >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_XDIGIT|IN6PTON_COLON_MASK);
+		}
+		s++;
+		srclen--;
+	}
+
+	i = 15; d--;
+
+	if (dc) {
+		while(d >= dc)
+			dst[i--] = *d--;
+		while(i >= dc - dbuf)
+			dst[i--] = 0;
+		while(i >= 0)
+			dst[i--] = *d--;
+	} else
+		memcpy(dst, dbuf, sizeof(dbuf));
+
+	ret = 1;
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
 #endif /* __BACKPORT_LINUX_INET_H_TO_2_6_26__ */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/kernel.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/kernel.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/kernel.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,14 +3,32 @@
 
 #include_next <linux/kernel.h>
 
-#define USHORT_MAX     ((u16)(~0U))
+#include <asm/errno.h>
+#include <asm/string.h>
 
+#define USHORT_MAX	((u16)(~0U))
 #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
 
-#endif
-#ifndef BACKPORT_KERNEL_H_2_6_19
-#define BACKPORT_KERNEL_H_2_6_19
+static inline int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
+{
+	char *tail;
+	unsigned long val;
+	size_t len;
 
-#include <linux/log2.h>
+	*res = 0;
+	len = strlen(cp);
+	if (len == 0)
+		return -EINVAL;
 
+	val = simple_strtoul(cp, &tail, base);
+	if ((*tail == '\0') ||
+		((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
+		*res = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/list.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/list.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/list.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,18 +1,9 @@
-#ifndef __BACKPORT_LINUX_LIST_H
-#define __BACKPORT_LINUX_LIST_H
+#ifndef __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#define __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#include_next<linux/list.h>
 
-#include_next <linux/list.h>
-
-/**
- * + * list_first_entry - get the first element from a list
- * + * @ptr:       the list head to take the element from.
- * + * @type:      the type of the struct this is embedded in.
- * + * @member:    the name of the list_struct within the struct.
- * + *
- * + * Note, that list is expected to be not empty.
- * + */
-
 #define list_first_entry(ptr, type, member) \
 	list_entry((ptr)->next, type, member)
 
-#endif /* __BACKPORT_LINUX_LIST_H */
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/log2.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/log2.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/log2.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -64,6 +64,15 @@
 	return 1UL << fls_long(n - 1);
 }
 
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+	return 1UL << (fls_long(n) - 1);
+}
+
 /**
  * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
  * @n - parameter
@@ -166,4 +175,20 @@
 	__roundup_pow_of_two(n)			\
  )
 
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n)			\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n == 1) ? 0 :			\
+		(1UL << ilog2(n))) :		\
+	__rounddown_pow_of_two(n)		\
+ )
+
 #endif /* _LINUX_LOG2_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/magic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/magic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/magic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef BACKPORT_LINUX_MAGIC_H
+#define BACKPORT_LINUX_MAGIC_H
+
+#define NFS_SUPER_MAGIC		0x6969
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mm.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mm.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mm.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -2,26 +2,39 @@
 #define _BACKPORT_LINUX_MM_H_
 
 #include_next <linux/mm.h>
+#include <linux/vmstat.h>
 
 #if defined(__i386__)
 #include <asm/highmem.h>
 #endif
 
-/*
- * Determine if an address is within the vmalloc range
- *
- * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
- * is no special casing required.
- */
-static inline int is_vmalloc_addr(const void *x)
+#define VM_CAN_NONLINEAR 0x08000000     /* Has ->fault & does nonlinear pages */
+
+#define is_vmalloc_addr(x) ((unsigned long)(x) >= VMALLOC_START && (unsigned long)(x) < VMALLOC_END)
+
+struct shrinker {
+	shrinker_t		shrink;
+	struct list_head	list;
+	int			seeks;  /* seeks to recreate an obj */
+	long			nr;     /* objs pending delete */
+};
+
+static inline void task_io_account_cancelled_write(size_t bytes)
 {
-#ifdef CONFIG_MMU
-	unsigned long addr = (unsigned long)x;
+}
 
-	return addr >= VMALLOC_START && addr < VMALLOC_END;
-#else
-	return 0;
-#endif
+static inline void cancel_dirty_page(struct page *page, unsigned int account_size)
+{
+	if (TestClearPageDirty(page)) {
+		struct address_space *mapping = page->mapping;
+		if (mapping && mapping_cap_account_dirty(mapping)) {
+			dec_zone_page_state(page, NR_FILE_DIRTY);
+			dec_bdi_stat(mapping->backing_dev_info,
+					BDI_RECLAIMABLE);
+			if (account_size)
+				task_io_account_cancelled_write(account_size);
+		}
+	}
 }
 
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mount.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mount.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mount.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,11 @@
+#ifndef BACKPORT_LINUX_MOUNT_H
+#define BACKPORT_LINUX_MOUNT_H
+
+#include_next <linux/mount.h>
+#include <linux/fs.h>
+
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+extern int init_mnt_writers(void);
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mpage.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mpage.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/mpage.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,15 @@
+#ifndef BACKPORT_LINUX_MPAGE_H
+#define BACKPORT_LINUX_MPAGE_H
+
+#include_next <linux/mpage.h>
+#include <linux/pagevec.h>
+
+typedef int (*backport_writepage_t)(struct page *page, struct writeback_control *wbc,
+                                void *data);
+
+extern int backport_write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data);
+
+#define write_cache_pages backport_write_cache_pages
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/namei.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/namei.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/namei.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+#ifndef BACKPORT_LINUX_NAMEI_H
+#define BACKPORT_LINUX_NAMEI_H
+
+#include_next <linux/namei.h>
+#include <linux/mount.h>
+
+static inline int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
+		    const char *name, unsigned int flags,
+		    struct nameidata *nd)
+{
+	int retval;
+
+	/* same as do_path_lookup */
+	nd->last_type = LAST_ROOT;
+	nd->flags = flags;
+	nd->depth = 0;
+
+	nd->dentry = dentry;
+	nd->mnt = mnt;
+	mntget(nd->mnt);
+	dget(nd->dentry);
+
+	retval = path_walk(name, nd);
+
+	return retval;
+}
+#endif /* BACKPORT_LINUX_NAMEI_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/net.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/net.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/net.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -4,4 +4,16 @@
 #include_next <linux/net.h>
 #include <linux/random.h>
 
+enum sock_shutdown_cmd {
+	SHUT_RD		= 0,
+	SHUT_WR		= 1,
+	SHUT_RDWR	= 2,
+};
+
+
+static inline int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd flags)
+{
+	return sock->ops->shutdown(sock, flags);
+}
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,6 +15,7 @@
 	(netdev)->ethtool_ops = (struct ethtool_ops *)(ops)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,617 +0,0 @@
-/*
- *  linux/include/linux/nfs_fs.h
- *
- *  Copyright (C) 1992  Rick Sladkey
- *
- *  OS-specific nfs filesystem definitions and declarations
- */
-
-#ifndef _LINUX_NFS_FS_H
-#define _LINUX_NFS_FS_H
-
-#define NFS_SUPER_MAGIC         0x6969
-
-/* Default timeout values */
-#define NFS_DEF_UDP_TIMEO	(11)
-#define NFS_DEF_UDP_RETRANS	(3)
-#define NFS_DEF_TCP_TIMEO	(600)
-#define NFS_DEF_TCP_RETRANS	(2)
-
-#define NFS_MAX_UDP_TIMEOUT	(60*HZ)
-#define NFS_MAX_TCP_TIMEOUT	(600*HZ)
-
-#define NFS_DEF_ACREGMIN	(3)
-#define NFS_DEF_ACREGMAX	(60)
-#define NFS_DEF_ACDIRMIN	(30)
-#define NFS_DEF_ACDIRMAX	(60)
-
-/*
- * When flushing a cluster of dirty pages, there can be different
- * strategies:
- */
-#define FLUSH_SYNC		1	/* file being synced, or contention */
-#define FLUSH_STABLE		4	/* commit to stable storage */
-#define FLUSH_LOWPRI		8	/* low priority background flush */
-#define FLUSH_HIGHPRI		16	/* high priority memory reclaim flush */
-#define FLUSH_NOCOMMIT		32	/* Don't send the NFSv3/v4 COMMIT */
-#define FLUSH_INVALIDATE	64	/* Invalidate the page cache */
-#define FLUSH_NOWRITEPAGE	128	/* Don't call writepage() */
-
-#ifdef __KERNEL__
-
-#include <linux/in.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/rbtree.h>
-#include <linux/rwsem.h>
-#include <linux/wait.h>
-
-#include <linux/sunrpc/debug.h>
-#include <linux/sunrpc/auth.h>
-#include <linux/sunrpc/clnt.h>
-
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-#include <linux/nfs_xdr.h>
-#include <linux/nfs_fs_sb.h>
-
-#include <linux/mempool.h>
-
-#include <linux/path.h>
-
-/*
- * These are the default flags for swap requests
- */
-#define NFS_RPC_SWAPFLAGS		(RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
-
-/*
- * NFSv3/v4 Access mode cache entry
- */
-struct nfs_access_entry {
-	struct rb_node		rb_node;
-	struct list_head	lru;
-	unsigned long		jiffies;
-	struct rpc_cred *	cred;
-	int			mask;
-};
-
-struct nfs4_state;
-struct nfs_open_context {
-	atomic_t count;
-	struct path path;
-	struct rpc_cred *cred;
-	struct nfs4_state *state;
-	fl_owner_t lockowner;
-	int mode;
-
-	unsigned long flags;
-#define NFS_CONTEXT_ERROR_WRITE		(0)
-	int error;
-
-	struct list_head list;
-
-	__u64 dir_cookie;
-};
-
-/*
- * NFSv4 delegation
- */
-struct nfs_delegation;
-
-struct posix_acl;
-
-/*
- * nfs fs inode data in memory
- */
-struct nfs_inode {
-	/*
-	 * The 64bit 'inode number'
-	 */
-	__u64 fileid;
-
-	/*
-	 * NFS file handle
-	 */
-	struct nfs_fh		fh;
-
-	/*
-	 * Various flags
-	 */
-	unsigned long		flags;			/* atomic bit ops */
-	unsigned long		cache_validity;		/* bit mask */
-
-	/*
-	 * read_cache_jiffies is when we started read-caching this inode.
-	 * attrtimeo is for how long the cached information is assumed
-	 * to be valid. A successful attribute revalidation doubles
-	 * attrtimeo (up to acregmax/acdirmax), a failure resets it to
-	 * acregmin/acdirmin.
-	 *
-	 * We need to revalidate the cached attrs for this inode if
-	 *
-	 *	jiffies - read_cache_jiffies > attrtimeo
-	 */
-	unsigned long		read_cache_jiffies;
-	unsigned long		attrtimeo;
-	unsigned long		attrtimeo_timestamp;
-	__u64			change_attr;		/* v4 only */
-
-	unsigned long		last_updated;
-	/* "Generation counter" for the attribute cache. This is
-	 * bumped whenever we update the metadata on the
-	 * server.
-	 */
-	unsigned long		cache_change_attribute;
-
-	struct rb_root		access_cache;
-	struct list_head	access_cache_entry_lru;
-	struct list_head	access_cache_inode_lru;
-#ifdef CONFIG_NFS_V3_ACL
-	struct posix_acl	*acl_access;
-	struct posix_acl	*acl_default;
-#endif
-
-	/*
-	 * This is the cookie verifier used for NFSv3 readdir
-	 * operations
-	 */
-	__be32			cookieverf[2];
-
-	/*
-	 * This is the list of dirty unwritten pages.
-	 */
-	struct radix_tree_root	nfs_page_tree;
-
-	unsigned long		ncommit,
-				npages;
-
-	/* Open contexts for shared mmap writes */
-	struct list_head	open_files;
-
-	/* Number of in-flight sillydelete RPC calls */
-	atomic_t		silly_count;
-	/* List of deferred sillydelete requests */
-	struct hlist_head	silly_list;
-	wait_queue_head_t	waitqueue;
-
-#ifdef CONFIG_NFS_V4
-	struct nfs4_cached_acl	*nfs4_acl;
-        /* NFSv4 state */
-	struct list_head	open_states;
-	struct nfs_delegation	*delegation;
-	int			 delegation_state;
-	struct rw_semaphore	rwsem;
-#endif /* CONFIG_NFS_V4*/
-	struct inode		vfs_inode;
-};
-
-/*
- * Cache validity bit flags
- */
-#define NFS_INO_INVALID_ATTR	0x0001		/* cached attrs are invalid */
-#define NFS_INO_INVALID_DATA	0x0002		/* cached data is invalid */
-#define NFS_INO_INVALID_ATIME	0x0004		/* cached atime is invalid */
-#define NFS_INO_INVALID_ACCESS	0x0008		/* cached access cred invalid */
-#define NFS_INO_INVALID_ACL	0x0010		/* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE	0x0020		/* must revalidate pagecache */
-#define NFS_INO_REVAL_FORCED	0x0040		/* force revalidation ignoring a delegation */
-
-/*
- * Bit offsets in flags field
- */
-#define NFS_INO_REVALIDATING	(0)		/* revalidating attrs */
-#define NFS_INO_ADVISE_RDPLUS	(1)		/* advise readdirplus */
-#define NFS_INO_STALE		(2)		/* possible stale inode */
-#define NFS_INO_ACL_LRU_SET	(3)		/* Inode is on the LRU list */
-#define NFS_INO_MOUNTPOINT	(4)		/* inode is remote mountpoint */
-
-static inline struct nfs_inode *NFS_I(const struct inode *inode)
-{
-	return container_of(inode, struct nfs_inode, vfs_inode);
-}
-
-static inline struct nfs_server *NFS_SB(const struct super_block *s)
-{
-	return (struct nfs_server *)(s->s_fs_info);
-}
-
-static inline struct nfs_fh *NFS_FH(const struct inode *inode)
-{
-	return &NFS_I(inode)->fh;
-}
-
-static inline struct nfs_server *NFS_SERVER(const struct inode *inode)
-{
-	return NFS_SB(inode->i_sb);
-}
-
-static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode)
-{
-	return NFS_SERVER(inode)->client;
-}
-
-static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
-{
-	return NFS_SERVER(inode)->nfs_client->rpc_ops;
-}
-
-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
-{
-	return NFS_I(inode)->cookieverf;
-}
-
-static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
-{
-	struct nfs_server *nfss = NFS_SERVER(inode);
-	return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin;
-}
-
-static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode)
-{
-	struct nfs_server *nfss = NFS_SERVER(inode);
-	return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax;
-}
-
-static inline int NFS_STALE(const struct inode *inode)
-{
-	return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
-}
-
-static inline __u64 NFS_FILEID(const struct inode *inode)
-{
-	return NFS_I(inode)->fileid;
-}
-
-static inline void set_nfs_fileid(struct inode *inode, __u64 fileid)
-{
-	NFS_I(inode)->fileid = fileid;
-}
-
-static inline void nfs_mark_for_revalidate(struct inode *inode)
-{
-	struct nfs_inode *nfsi = NFS_I(inode);
-
-	spin_lock(&inode->i_lock);
-	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
-	if (S_ISDIR(inode->i_mode))
-		nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
-	spin_unlock(&inode->i_lock);
-}
-
-static inline int nfs_server_capable(struct inode *inode, int cap)
-{
-	return NFS_SERVER(inode)->caps & cap;
-}
-
-static inline int NFS_USE_READDIRPLUS(struct inode *inode)
-{
-	return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
-}
-
-static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
-{
-	dentry->d_time = verf;
-}
-
-/**
- * nfs_save_change_attribute - Returns the inode attribute change cookie
- * @dir - pointer to parent directory inode
- * The "change attribute" is updated every time we finish an operation
- * that will result in a metadata change on the server.
- */
-static inline unsigned long nfs_save_change_attribute(struct inode *dir)
-{
-	return NFS_I(dir)->cache_change_attribute;
-}
-
-/**
- * nfs_verify_change_attribute - Detects NFS remote directory changes
- * @dir - pointer to parent directory inode
- * @chattr - previously saved change attribute
- * Return "false" if the verifiers doesn't match the change attribute.
- * This would usually indicate that the directory contents have changed on
- * the server, and that any dentries need revalidating.
- */
-static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr)
-{
-	return chattr == NFS_I(dir)->cache_change_attribute;
-}
-
-/*
- * linux/fs/nfs/inode.c
- */
-extern int nfs_sync_mapping(struct address_space *mapping);
-extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
-extern void nfs_zap_caches(struct inode *);
-extern void nfs_invalidate_atime(struct inode *);
-extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
-				struct nfs_fattr *);
-extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
-extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int nfs_permission(struct inode *, int, struct nameidata *);
-extern int nfs_open(struct inode *, struct file *);
-extern int nfs_release(struct inode *, struct file *);
-extern int nfs_attribute_timeout(struct inode *inode);
-extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
-extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
-extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
-extern int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping);
-extern int nfs_setattr(struct dentry *, struct iattr *);
-extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
-extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
-extern void put_nfs_open_context(struct nfs_open_context *ctx);
-extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
-extern u64 nfs_compat_user_ino64(u64 fileid);
-
-/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
-extern __be32 root_nfs_parse_addr(char *name); /*__init*/
-
-static inline void nfs_fattr_init(struct nfs_fattr *fattr)
-{
-	fattr->valid = 0;
-	fattr->time_start = jiffies;
-}
-
-/*
- * linux/fs/nfs/file.c
- */
-extern const struct inode_operations nfs_file_inode_operations;
-#ifdef CONFIG_NFS_V3
-extern const struct inode_operations nfs3_file_inode_operations;
-#endif /* CONFIG_NFS_V3 */
-extern const struct file_operations nfs_file_operations;
-extern const struct address_space_operations nfs_file_aops;
-
-static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
-{
-	return filp->private_data;
-}
-
-static inline struct rpc_cred *nfs_file_cred(struct file *file)
-{
-	if (file != NULL)
-		return nfs_file_open_context(file)->cred;
-	return NULL;
-}
-
-/*
- * linux/fs/nfs/xattr.c
- */
-#ifdef CONFIG_NFS_V3_ACL
-extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
-extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t);
-extern int nfs3_setxattr(struct dentry *, const char *,
-			const void *, size_t, int);
-extern int nfs3_removexattr (struct dentry *, const char *name);
-#else
-# define nfs3_listxattr NULL
-# define nfs3_getxattr NULL
-# define nfs3_setxattr NULL
-# define nfs3_removexattr NULL
-#endif
-
-/*
- * linux/fs/nfs/direct.c
- */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
-			unsigned long);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
-	       	unsigned long nr_segs , loff_t pos);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-		unsigned long nr_segs , loff_t pos);
-
-/*
- * linux/fs/nfs/dir.c
- */
-extern const struct inode_operations nfs_dir_inode_operations;
-#ifdef CONFIG_NFS_V3
-extern const struct inode_operations nfs3_dir_inode_operations;
-#endif /* CONFIG_NFS_V3 */
-extern const struct file_operations nfs_dir_operations;
-extern struct dentry_operations nfs_dentry_operations;
-
-extern void nfs_force_lookup_revalidate(struct inode *dir);
-extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
-extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
-extern void nfs_access_zap_cache(struct inode *inode);
-
-/*
- * linux/fs/nfs/symlink.c
- */
-extern const struct inode_operations nfs_symlink_inode_operations;
-
-/*
- * linux/fs/nfs/sysctl.c
- */
-#ifdef CONFIG_SYSCTL
-extern int nfs_register_sysctl(void);
-extern void nfs_unregister_sysctl(void);
-#else
-#define nfs_register_sysctl() 0
-#define nfs_unregister_sysctl() do { } while(0)
-#endif
-
-/*
- * linux/fs/nfs/namespace.c
- */
-extern const struct inode_operations nfs_mountpoint_inode_operations;
-extern const struct inode_operations nfs_referral_inode_operations;
-extern int nfs_mountpoint_expiry_timeout;
-extern void nfs_release_automount_timer(void);
-
-/*
- * linux/fs/nfs/unlink.c
- */
-extern int  nfs_async_unlink(struct inode *dir, struct dentry *dentry);
-extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
-extern void nfs_block_sillyrename(struct dentry *dentry);
-extern void nfs_unblock_sillyrename(struct dentry *dentry);
-
-/*
- * linux/fs/nfs/write.c
- */
-extern int  nfs_congestion_kb;
-extern int  nfs_writepage(struct page *page, struct writeback_control *wbc);
-extern int  nfs_writepages(struct address_space *, struct writeback_control *);
-extern int  nfs_flush_incompatible(struct file *file, struct page *page);
-extern int  nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
-extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
-extern void nfs_writedata_release(void *);
-
-/*
- * Try to write back everything synchronously (but check the
- * return value!)
- */
-extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int);
-extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_nocommit(struct inode *inode);
-extern int nfs_wb_page(struct inode *inode, struct page* page);
-extern int nfs_wb_page_priority(struct inode *inode, struct page* page, int how);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-extern int  nfs_commit_inode(struct inode *, int);
-extern struct nfs_write_data *nfs_commitdata_alloc(void);
-extern void nfs_commit_free(struct nfs_write_data *wdata);
-extern void nfs_commitdata_release(void *wdata);
-#else
-static inline int
-nfs_commit_inode(struct inode *inode, int how)
-{
-	return 0;
-}
-#endif
-
-static inline int
-nfs_have_writebacks(struct inode *inode)
-{
-	return NFS_I(inode)->npages != 0;
-}
-
-/*
- * Allocate nfs_write_data structures
- */
-extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
-
-/*
- * linux/fs/nfs/read.c
- */
-extern int  nfs_readpage(struct file *, struct page *);
-extern int  nfs_readpages(struct file *, struct address_space *,
-		struct list_head *, unsigned);
-extern int  nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
-extern void nfs_readdata_release(void *data);
-
-/*
- * Allocate nfs_read_data structures
- */
-extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
-
-/*
- * linux/fs/nfs3proc.c
- */
-#ifdef CONFIG_NFS_V3_ACL
-extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type);
-extern int nfs3_proc_setacl(struct inode *inode, int type,
-			    struct posix_acl *acl);
-extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
-		mode_t mode);
-extern void nfs3_forget_cached_acls(struct inode *inode);
-#else
-static inline int nfs3_proc_set_default_acl(struct inode *dir,
-					    struct inode *inode,
-					    mode_t mode)
-{
-	return 0;
-}
-
-static inline void nfs3_forget_cached_acls(struct inode *inode)
-{
-}
-#endif /* CONFIG_NFS_V3_ACL */
-
-/*
- * linux/fs/mount_clnt.c
- */
-extern int  nfs_mount(struct sockaddr *, size_t, char *, char *,
-		      int, int, struct nfs_fh *);
-
-/*
- * inline functions
- */
-
-static inline loff_t nfs_size_to_loff_t(__u64 size)
-{
-	if (size > (__u64) OFFSET_MAX - 1)
-		return OFFSET_MAX - 1;
-	return (loff_t) size;
-}
-
-static inline ino_t
-nfs_fileid_to_ino_t(u64 fileid)
-{
-	ino_t ino = (ino_t) fileid;
-	if (sizeof(ino_t) < sizeof(u64))
-		ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8;
-	return ino;
-}
-
-/* NFS root */
-
-extern void * nfs_root_data(void);
-
-#define nfs_wait_event(clnt, wq, condition)				\
-({									\
-	int __retval = 0;						\
-	if (clnt->cl_intr) {						\
-		sigset_t oldmask;					\
-		rpc_clnt_sigmask(clnt, &oldmask);			\
-		__retval = wait_event_interruptible(wq, condition);	\
-		rpc_clnt_sigunmask(clnt, &oldmask);			\
-	} else								\
-		wait_event(wq, condition);				\
-	__retval;							\
-})
-
-#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
-
-#endif /* __KERNEL__ */
-
-/*
- * NFS debug flags
- */
-#define NFSDBG_VFS		0x0001
-#define NFSDBG_DIRCACHE		0x0002
-#define NFSDBG_LOOKUPCACHE	0x0004
-#define NFSDBG_PAGECACHE	0x0008
-#define NFSDBG_PROC		0x0010
-#define NFSDBG_XDR		0x0020
-#define NFSDBG_FILE		0x0040
-#define NFSDBG_ROOT		0x0080
-#define NFSDBG_CALLBACK		0x0100
-#define NFSDBG_CLIENT		0x0200
-#define NFSDBG_MOUNT		0x0400
-#define NFSDBG_ALL		0xFFFF
-
-#ifdef __KERNEL__
-
-/*
- * Enable debugging support for nfs client.
- * Requires RPC_DEBUG.
- */
-#ifdef RPC_DEBUG
-# define NFS_DEBUG
-#endif
-
-# undef ifdebug
-# ifdef NFS_DEBUG
-#  define ifdebug(fac)		if (unlikely(nfs_debug & NFSDBG_##fac))
-# else
-#  define ifdebug(fac)		if (0)
-# endif
-#endif /* __KERNEL */
-
-#endif

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs_sb.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs_sb.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/nfs_fs_sb.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,139 +0,0 @@
-#ifndef _NFS_FS_SB
-#define _NFS_FS_SB
-
-#include <linux/list.h>
-#include <linux/backing-dev.h>
-#include <linux/wait.h>
-
-#include <asm/atomic.h>
-
-struct nfs_iostats;
-struct nlm_host;
-
-/*
- * The nfs_client identifies our client state to the server.
- */
-struct nfs_client {
-	atomic_t		cl_count;
-	int			cl_cons_state;	/* current construction state (-ve: init error) */
-#define NFS_CS_READY		0		/* ready to be used */
-#define NFS_CS_INITING		1		/* busy initialising */
-	unsigned long		cl_res_state;	/* NFS resources state */
-#define NFS_CS_CALLBACK		1		/* - callback started */
-#define NFS_CS_IDMAP		2		/* - idmap started */
-#define NFS_CS_RENEWD		3		/* - renewd started */
-	struct sockaddr_storage	cl_addr;	/* server identifier */
-	size_t			cl_addrlen;
-	char *			cl_hostname;	/* hostname of server */
-	struct list_head	cl_share_link;	/* link in global client list */
-	struct list_head	cl_superblocks;	/* List of nfs_server structs */
-
-	struct rpc_clnt *	cl_rpcclient;
-	const struct nfs_rpc_ops *rpc_ops;	/* NFS protocol vector */
-	int			cl_proto;	/* Network transport protocol */
-
-	struct rpc_cred		*cl_machine_cred;
-
-#ifdef CONFIG_NFS_V4
-	u64			cl_clientid;	/* constant */
-	nfs4_verifier		cl_confirm;
-	unsigned long		cl_state;
-
-	struct rb_root		cl_openowner_id;
-	struct rb_root		cl_lockowner_id;
-
-	/*
-	 * The following rwsem ensures exclusive access to the server
-	 * while we recover the state following a lease expiration.
-	 */
-	struct rw_semaphore	cl_sem;
-
-	struct list_head	cl_delegations;
-	struct rb_root		cl_state_owners;
-	spinlock_t		cl_lock;
-
-	unsigned long		cl_lease_time;
-	unsigned long		cl_last_renewal;
-	struct delayed_work	cl_renewd;
-
-	struct rpc_wait_queue	cl_rpcwaitq;
-
-	/* used for the setclientid verifier */
-	struct timespec		cl_boot_time;
-
-	/* idmapper */
-	struct idmap *		cl_idmap;
-
-	/* Our own IP address, as a null-terminated string.
-	 * This is used to generate the clientid, and the callback address.
-	 */
-	char			cl_ipaddr[48];
-	unsigned char		cl_id_uniquifier;
-#endif
-};
-
-/*
- * NFS client parameters stored in the superblock.
- */
-struct nfs_server {
-	struct nfs_client *	nfs_client;	/* shared client and NFS4 state */
-	struct list_head	client_link;	/* List of other nfs_server structs
-						 * that share the same client
-						 */
-	struct list_head	master_link;	/* link in master servers list */
-	struct rpc_clnt *	client;		/* RPC client handle */
-	struct rpc_clnt *	client_acl;	/* ACL RPC client handle */
-	struct nlm_host		*nlm_host;	/* NLM client handle */
-	struct nfs_iostats *	io_stats;	/* I/O statistics */
-	struct backing_dev_info	backing_dev_info;
-	atomic_t		writeback;	/* number of writeback pages */
-	int			flags;		/* various flags */
-	unsigned int		caps;		/* server capabilities */
-	unsigned int		rsize;		/* read size */
-	unsigned int		rpages;		/* read size (in pages) */
-	unsigned int		wsize;		/* write size */
-	unsigned int		wpages;		/* write size (in pages) */
-	unsigned int		wtmult;		/* server disk block size */
-	unsigned int		dtsize;		/* readdir size */
-	unsigned short		port;		/* "port=" setting */
-	unsigned int		bsize;		/* server block size */
-	unsigned int		acregmin;	/* attr cache timeouts */
-	unsigned int		acregmax;
-	unsigned int		acdirmin;
-	unsigned int		acdirmax;
-	unsigned int		namelen;
-
-	struct nfs_fsid		fsid;
-	__u64			maxfilesize;	/* maximum file size */
-	unsigned long		mount_time;	/* when this fs was mounted */
-	dev_t			s_dev;		/* superblock dev numbers */
-
-#ifdef CONFIG_NFS_V4
-	u32			attr_bitmask[2];/* V4 bitmask representing the set
-						   of attributes supported on this
-						   filesystem */
-	u32			acl_bitmask;	/* V4 bitmask representing the ACEs
-						   that are supported on this
-						   filesystem */
-#endif
-	void (*destroy)(struct nfs_server *);
-
-	atomic_t active; /* Keep trace of any activity to this server */
-	wait_queue_head_t active_wq;  /* Wait for any activity to stop  */
-
-	/* mountd-related mount options */
-	struct sockaddr_storage	mountd_address;
-	size_t			mountd_addrlen;
-	u32			mountd_version;
-	unsigned short		mountd_port;
-	unsigned short		mountd_protocol;
-};
-
-/* Server capabilities */
-#define NFS_CAP_READDIRPLUS	(1U << 0)
-#define NFS_CAP_HARDLINKS	(1U << 1)
-#define NFS_CAP_SYMLINKS	(1U << 2)
-#define NFS_CAP_ACLS		(1U << 3)
-#define NFS_CAP_ATOMIC_OPEN	(1U << 4)
-
-#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/pagemap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/pagemap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/pagemap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_PAGEMAP_H
+#define BACKPORT_LINUX_PAGEMAP_H
+
+#include_next <linux/pagemap.h>
+
+#define __grab_cache_page	grab_cache_page
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/path.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/path.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/path.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,12 +1,36 @@
 #ifndef _BACKPORT_LINUX_PATH_H
 #define _BACKPORT_LINUX_PATH_H
 
-struct dentry;
-struct vfsmount;
+#include <linux/mount.h>
+#include <linux/namei.h>
 
 struct path {
 	struct vfsmount *mnt;
 	struct dentry *dentry;
 };
 
+static inline void path_put(struct path *path)
+{
+	dput(path->dentry);
+	mntput(path->mnt);
+}
+
+static inline void path_get(struct path *path)
+{
+	mntget(path->mnt);
+	dget(path->dentry);
+}
+
+static inline void backport_path_put(struct nameidata *nd)
+{
+	dput(nd->dentry);
+	mntput(nd->mnt);
+}
+
+static inline void backport_path_get(struct nameidata *nd)
+{
+	mntget(nd->mnt);
+	dget(nd->dentry);
+}
+
 #endif  /* _BACKPORT_LINUX_PATH_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/proc_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/proc_fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/proc_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+#ifndef BACKPORT_LINUX_PROC_FS_H
+#define BACKPORT_LINUX_PROC_FS_H
+
+#include_next <linux/proc_fs.h>
+
+static inline struct proc_dir_entry *proc_create(const char *name,
+	mode_t mode, struct proc_dir_entry *parent,
+	const struct file_operations *fops)
+{
+	struct proc_dir_entry *res = create_proc_entry(name, mode, parent);
+	if (res)
+		res->proc_fops = fops;
+	return res;
+}
+
+static inline struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+				struct proc_dir_entry *parent,
+				const struct file_operations *proc_fops,
+				void *data)
+{
+		struct proc_dir_entry *pde;
+
+		pde = proc_create(name, mode, parent, proc_fops);
+		if (pde)
+			pde->data = data;
+
+		return pde;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/radix-tree.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/radix-tree.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/radix-tree.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+#ifndef BACKPORT_LINUX_RADIX_TREE_H
+#define BACKPORT_LINUX_RADIX_TREE_H
+
+#include_next <linux/radix-tree.h>
+
+static inline int backport_radix_tree_preload(gfp_t gfp_mask)
+{
+	return 0;
+}
+
+#define radix_tree_preload backport_radix_tree_preload
+
+static inline void backport_radix_tree_preload_end(void)
+{
+}
+
+#define radix_tree_preload_end backport_radix_tree_preload_end
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/scatterlist.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/scatterlist.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/scatterlist.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,6 +15,10 @@
 	sg->page = page;
 }
 
+static inline void sg_mark_end(struct scatterlist *sg)
+{
+}
+
 #define sg_page(a) (a)->page
 #define sg_init_table(a, b)
 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sched.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sched.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sched.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+#ifndef LINUX_SCHED_BACKPORT_H
+#define LINUX_SCHED_BACKPORT_H
+
+#include_next <linux/sched.h>
+
+#define TASK_WAKEKILL	   128
+
+#define TASK_KILLABLE	   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+
+#define schedule_timeout_killable(_arg) schedule_timeout_interruptible(_arg)
+
+static inline int __fatal_signal_pending(struct task_struct *tsk)
+{
+	return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+	return signal_pending(p) && __fatal_signal_pending(p);
+}
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+	return tsk->pid;
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/security.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/security.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/security.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,54 @@
+#ifndef BACKPORT_LINUX_SECURITY_H
+#define BACKPORT_LINUX_SECURITY_H
+
+#include_next <linux/security.h>
+
+struct security_mnt_opts {
+	char **mnt_opts;
+	int *mnt_opts_flags;
+	int num_mnt_opts;
+};
+
+static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+{
+	opts->mnt_opts = NULL;
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+{
+	int i;
+	if (opts->mnt_opts)
+		for (i = 0; i < opts->num_mnt_opts; i++)
+			kfree(opts->mnt_opts[i]);
+	kfree(opts->mnt_opts);
+	opts->mnt_opts = NULL;
+	kfree(opts->mnt_opts_flags);
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline int security_sb_set_mnt_opts(struct super_block *sb,
+					   struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+					      struct super_block *newsb)
+{ }
+
+static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline int backport_security_sb_copy_data(void *orig, void *copy)
+{
+	return 0;
+}
+
+#define security_sb_copy_data(a,b) backport_security_sb_copy_data(a,b)
+
+#endif /* BACKPORT_LINUX_SECURITY_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/seq_file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/seq_file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/seq_file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,61 @@
+#ifndef BACKPORT_LINUX_SEQ_FILE_H
+#define BACKPORT_LINUX_SEQ_FILE_H
+
+#include_next <linux/seq_file.h>
+#include <linux/fs.h>
+
+static inline struct list_head *seq_list_start(struct list_head *head, loff_t pos)
+{
+	struct list_head *lh;
+
+	list_for_each(lh, head)
+		if (pos-- == 0)
+			return lh;
+
+	return NULL;
+}
+
+static inline struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
+{
+	if (!pos)
+		return head;
+
+	return seq_list_start(head, pos - 1);
+}
+
+static inline struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
+{
+	struct list_head *lh;
+
+	lh = ((struct list_head *)v)->next;
+	++*ppos;
+	return lh == head ? NULL : lh;
+}
+
+static inline void *__seq_open_private(struct file *f, struct seq_operations *ops,
+		int psize)
+{
+	int rc;
+	void *private;
+	struct seq_file *seq;
+
+	private = kzalloc(psize, GFP_KERNEL);
+	if (private == NULL)
+		goto out;
+
+	rc = seq_open(f, ops);
+	if (rc < 0)
+		goto out_free;
+
+	seq = f->private_data;
+	seq->private = private;
+	return private;
+
+out_free:
+	kfree(private);
+out:
+	return NULL;
+}
+
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/skbuff.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/skbuff.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/skbuff.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -14,6 +14,7 @@
 
 #define transport_header h.raw
 #define network_header nh.raw
+#define mac_header mac.raw
 
 static inline void skb_reset_mac_header(struct sk_buff *skb)
 {
@@ -25,6 +26,11 @@
 	skb->network_header = skb->data;
 }
 
+static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+{
+	skb->mac_header = skb->data + offset;
+}
+
 #if 0
 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
 					     void *to,
@@ -82,4 +88,8 @@
         skb->nh.raw = skb->data + offset;
 }
 
+static inline int skb_csum_unnecessary(const struct sk_buff *skb)
+{
+	return skb->ip_summed & CHECKSUM_UNNECESSARY;
+}
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/string.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/string.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/string.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+#ifndef BACKPORT_LINUX_STRING_H
+#define BACKPORT_LINUX_STRING_H
+
+#include_next <linux/string.h>
+
+extern void *__kmalloc(size_t, gfp_t);
+
+static inline char *kstrndup(const char *s, size_t max, gfp_t gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strnlen(s, max);
+	buf = __kmalloc(len+1, gfp);
+	if (buf) {
+		memcpy(buf, s, len);
+		buf[len] = '\0';
+	}
+	return buf;
+}
+#endif /* BACKPORT_LINUX_STRING_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/swap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/swap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/swap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+#ifndef LINUX_SWAP_BACKPORT_H
+#define LINUX_SWAP_BACKPORT_H
+
+#include_next <linux/swap.h>
+
+static inline unsigned int backport_nr_free_buffer_pages(void)
+{
+	/* Just pick one node, since fallback list is circular */
+	pg_data_t *pgdat = NODE_DATA(numa_node_id());
+	unsigned int sum = 0;
+
+	struct zonelist *zonelist = pgdat->node_zonelists + gfp_zone(GFP_USER);
+	struct zone **zonep = zonelist->zones;
+	struct zone *zone;
+
+	for (zone = *zonep++; zone; zone = *zonep++) {
+		unsigned long size = zone->present_pages;
+		unsigned long high = zone->pages_high;
+		if (size > high)
+			sum += size - high;
+	}
+
+	return sum;
+}
+
+#define nr_free_buffer_pages backport_nr_free_buffer_pages
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sysctl.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sysctl.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/sysctl.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -72,4 +72,11 @@
 
 #define unregister_sysctl_table(hdr)	fake_unregister_sysctl_table(hdr)
 
+static inline struct ctl_table_header *
+backport_register_sysctl_table(ctl_table *table) {
+	return register_sysctl_table(table, 0);
+}
+
+#define register_sysctl_table backport_register_sysctl_table
+
 #endif /* __BACKPORT_SYSCTL_H_TO_2_6_18__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/unaligned/access_ok.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/unaligned/access_ok.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/unaligned/access_ok.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/wait.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/wait.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/wait.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,47 @@
+#ifndef BACKPORT_LINUX_WAIT_H
+#define BACKPORT_LINUX_WAIT_H
+
+#include_next <linux/wait.h>
+
+#define __wait_event_killable(wq, condition, ret)		\
+do {								\
+	DEFINE_WAIT(__wait);					\
+								\
+	for (;;) {						\
+		prepare_to_wait(&wq, &__wait, TASK_KILLABLE);	\
+		if (condition)					\
+			break;					\
+		if (!fatal_signal_pending(current)) {		\
+			schedule();				\
+			continue;				\
+		}						\
+		ret = -ERESTARTSYS;				\
+		break;						\
+	}							\
+	finish_wait(&wq, &__wait);				\
+} while (0)
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition)			\
+({								\
+	int __ret = 0;						\
+	if (!(condition))					\
+		__wait_event_killable(wq, condition, __ret);	\
+	__ret;							\
+})
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/workqueue.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/workqueue.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/linux/workqueue.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -60,5 +60,13 @@
 #define cancel_delayed_work_sync cancel_delayed_work
 #define cancel_rearming_delayed_workqueue backport_cancel_rearming_delayed_workqueue
 #define schedule_delayed_work backport_schedule_delayed_work
+#define cancel_delayed_work_sync cancel_delayed_work
 
+static inline void backport_cancel_rearming_delayed_work(struct delayed_work *work)
+{
+	cancel_delayed_work_sync(work);
+}
+
+#define cancel_rearming_delayed_work backport_cancel_rearming_delayed_work
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/rtnetlink.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/rtnetlink.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/rtnetlink.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef __BACKPORT_RTNETLINK_TO_2_6_27__
+#define __BACKPORT_RTNETLINK_TO_2_6_27__
+
+#include <linux/rtnetlink.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/udp.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/udp.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/net/udp.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_NET_UDP_H
+#define BACKPORT_NET_UDP_H
+
+#include_next <net/udp.h>
+
+static inline void UDPX_INC_STATS_BH(struct sock *sk, int field)
+{ }
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_device.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_device.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_device.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef SCSI_SCSI_DEVICE_BACKPORT_TO_2_6_26_H
+#define SCSI_SCSI_DEVICE_BACKPORT_TO_2_6_26_H
+
+#include_next <scsi/scsi_device.h>
+
+#define __starget_for_each_device(scsi_target, p, fn) starget_for_each_device(scsi_target, p, fn)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_host.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_host.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_host.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,11 @@
+#ifndef SCSI_SCSI_HOST_BACKPORT_TO_2_6_26_H
+#define SCSI_SCSI_HOST_BACKPORT_TO_2_6_26_H
+
+#include_next <scsi/scsi_host.h>
+
+static inline void *shost_priv(struct Scsi_Host *shost)
+{
+        return (void *)shost->hostdata;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_transport.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_transport.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi_transport.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_TRANSPORT_BACKPORT_TO_2_6_22_H
+#define SCSI_SCSI_TRANSPORT_BACKPORT_TO_2_6_22_H
+
+#include <scsi/scsi_device.h>
+#include_next <scsi/scsi_transport.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/namespace.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/namespace.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/namespace.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,95 @@
+#include <linux/spinlock_types.h>
+#include <linux/percpu.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+
+struct mnt_writer {
+	/*
+	 * If holding multiple instances of this lock, they
+	 * must be ordered by cpu number.
+	 */
+	spinlock_t lock;
+	struct lock_class_key lock_class; /* compiles out with !lockdep */
+	unsigned long count;
+	struct vfsmount *mnt;
+} ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+
+int __init init_mnt_writers(void)
+{
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
+		spin_lock_init(&writer->lock);
+		lockdep_set_class(&writer->lock, &writer->lock_class);
+		writer->count = 0;
+	}
+	return 0;
+}
+
+static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
+{
+	if (!cpu_writer->mnt)
+		return;
+	/*
+	 * This is in case anyone ever leaves an invalid,
+	 * old ->mnt and a count of 0.
+	 */
+	if (!cpu_writer->count)
+		return;
+	cpu_writer->count = 0;
+}
+
+static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
+					  struct vfsmount *mnt)
+{
+	if (cpu_writer->mnt == mnt)
+		return;
+	__clear_mnt_count(cpu_writer);
+	cpu_writer->mnt = mnt;
+}
+
+int mnt_want_write(struct vfsmount *mnt)
+{
+	int ret = 0;
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+	if (__mnt_is_readonly(mnt)) {
+		ret = -EROFS;
+		goto out;
+	}
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	cpu_writer->count++;
+out:
+	spin_unlock(&cpu_writer->lock);
+	put_cpu_var(mnt_writers);
+	return ret;
+}
+EXPORT_SYMBOL(mnt_want_write);
+
+void mnt_drop_write(struct vfsmount *mnt)
+{
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	if (cpu_writer->count > 0) {
+		cpu_writer->count--;
+	}
+
+	spin_unlock(&cpu_writer->lock);
+	/*
+	 * This could be done right after the spinlock
+	 * is taken because the spinlock keeps us on
+	 * the cpu, and disables preemption.  However,
+	 * putting it here bounds the amount that
+	 * __mnt_writers can underflow.  Without it,
+	 * we could theoretically wrap __mnt_writers.
+	 */
+	put_cpu_var(mnt_writers);
+}
+EXPORT_SYMBOL(mnt_drop_write);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/writeback.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/writeback.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.1/include/src/writeback.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,106 @@
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+#include <linux/writeback.h>
+#include <linux/mpage.h>
+#include <linux/module.h>
+
+int write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data)
+{
+        struct backing_dev_info *bdi = mapping->backing_dev_info;
+        int ret = 0;
+        int done = 0;
+        struct pagevec pvec;
+        int nr_pages;
+        pgoff_t index;
+        pgoff_t end;            /* Inclusive */
+        int scanned = 0;
+        int range_whole = 0;
+        long nr_to_write = wbc->nr_to_write;
+
+        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                wbc->encountered_congestion = 1;
+                return 0;
+        }
+
+        pagevec_init(&pvec, 0);
+        if (wbc->range_cyclic) {
+                index = mapping->writeback_index; /* Start from prev offset */
+                end = -1;
+        } else {
+                index = wbc->range_start >> PAGE_CACHE_SHIFT;
+                end = wbc->range_end >> PAGE_CACHE_SHIFT;
+                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                        range_whole = 1;
+                scanned = 1;
+        }
+retry:
+        while (!done && (index <= end) &&
+               (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                              PAGECACHE_TAG_DIRTY,
+                                              min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+                unsigned i;
+
+                scanned = 1;
+                for (i = 0; i < nr_pages; i++) {
+                        struct page *page = pvec.pages[i];
+
+                        /*
+                         * At this point we hold neither mapping->tree_lock nor
+                         * lock on the page itself: the page may be truncated or
+                         * invalidated (changing page->mapping to NULL), or even
+                         * swizzled back from swapper_space to tmpfs file
+                         * mapping
+                         */
+                        lock_page(page);
+
+                        if (unlikely(page->mapping != mapping)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (!wbc->range_cyclic && page->index > end) {
+                                done = 1;
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (wbc->sync_mode != WB_SYNC_NONE)
+                                wait_on_page_writeback(page);
+
+                        if (PageWriteback(page) ||
+                            !clear_page_dirty_for_io(page)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        ret = (*writepage)(page, wbc, data);
+
+                        if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+                                unlock_page(page);
+                                ret = 0;
+                        }
+                        if (ret || (--nr_to_write <= 0))
+                                done = 1;
+                        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                                wbc->encountered_congestion = 1;
+                                done = 1;
+                        }
+                }
+                pagevec_release(&pvec);
+                cond_resched();
+        }
+        if (!scanned && !done) {
+                /*
+                 * We hit the last page and there is more work to be done: wrap
+                 * back to the start of the file
+                 */
+                scanned = 1;
+                index = 0;
+                goto retry;
+        }
+        return ret;
+}
+EXPORT_SYMBOL(write_cache_pages);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm/unaligned.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm/unaligned.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm/unaligned.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+#define ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+
+#include <linux/unaligned/access_ok.h>
+#include_next <asm/unaligned.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm-generic/atomic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm-generic/atomic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/asm-generic/atomic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,40 @@
+#ifndef __BACKPORT_ASM_GENERIC_ATOMIC_H
+#define __BACKPORT_ASM_GENERIC_ATOMIC_H
+
+#include_next <asm-generic/atomic.h>
+
+#if BITS_PER_LONG == 64
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_dec_return(v);
+}
+
+#else
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_dec_return(v);
+}
+
+#endif  /*  BITS_PER_LONG == 64  */
+
+#endif  /*  __BACKPORT_ASM_GENERIC_ATOMIC_H  */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/backing-dev.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/backing-dev.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/backing-dev.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef BACKPORT_LINUX_BACK_DEV_H
+#define BACKPORT_LINUX_BACK_DEV_H
+
+#include_next <linux/backing-dev.h>
+
+enum bdi_stat_item {
+	BDI_RECLAIMABLE,
+	BDI_WRITEBACK,
+	NR_BDI_STAT_ITEMS
+};
+
+
+static inline void inc_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline int bdi_init(struct backing_dev_info *bdi)
+{
+	return 0;
+}
+
+static inline void bdi_destroy(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+				const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+{
+	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
+}
+
+static inline void bdi_unregister(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+static inline void set_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/capability.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/capability.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/capability.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+#ifndef BACKPORT_LINUX_CAPABILITY_H
+#define BACKPORT_LINUX_CAPABILITY_H
+
+#include_next <linux/capability.h>
+
+/* Override MAC access.
+   The base kernel enforces no MAC policy.
+   An LSM may enforce a MAC policy, and if it does and it chooses
+   to implement capability based overrides of that policy, this is
+   the capability it should use to do so. */
+
+#define CAP_MAC_OVERRIDE     32
+
+#define CAP_FS_MASK_B0	(CAP_TO_MASK(CAP_CHOWN)			\
+			 | CAP_TO_MASK(CAP_DAC_OVERRIDE)	\
+			 | CAP_TO_MASK(CAP_DAC_READ_SEARCH)	\
+			 | CAP_TO_MASK(CAP_FOWNER)		\
+			 | CAP_TO_MASK(CAP_FSETID))
+
+#define CAP_FS_MASK_B1	(CAP_TO_MASK(CAP_MAC_OVERRIDE))
+
+#define CAP_NFSD_SET	(CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE))
+#define CAP_FS_SET	(CAP_FS_MASK_B0)
+
+static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+					      const kernel_cap_t permitted)
+{
+	const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
+	return cap_combine(a,
+			   cap_intersect(permitted, __cap_nfsd_set));
+}
+
+static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
+{
+	const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
+	return cap_drop(a, __cap_fs_set);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/completion.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/completion.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/completion.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_COMPLETION_H
+#define BACKPORT_LINUX_COMPLETION_H
+
+#include_next <linux/completion.h>
+
+#define wait_for_completion_killable(_args) wait_for_completion_interruptible(_args)
+
+#endif /* BACKPORT_LINUX_COMPLETION_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/crypto.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/crypto.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/crypto.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -2,53 +2,8 @@
 #define BACKPORT_LINUX_CRYPTO_H
 
 #include_next <linux/crypto.h>
+#include <linux/ncrypto.h>
 
-#define CRYPTO_ALG_ASYNC               0x00000080
+#define CRYPTO_ALG_ASYNC	NCRYPTO_ALG_ASYNC 
 
-struct hash_desc
-{
-	struct crypto_tfm *tfm;
-	u32 flags;
-};
-
-static inline int crypto_hash_init(struct hash_desc *desc)
-{
-	crypto_digest_init(desc->tfm);
-	return 0;
-}
-
-static inline int crypto_hash_digest(struct hash_desc *desc,
-                                    struct scatterlist *sg,
-                                    unsigned int nbytes, u8 *out)
-{
-	crypto_digest_digest(desc->tfm, sg, 1, out);
-	return nbytes;
-}
-
-static inline int crypto_hash_update(struct hash_desc *desc,
-                                    struct scatterlist *sg,
-                                    unsigned int nbytes)
-{
-	crypto_digest_update(desc->tfm, sg, 1);
-	return nbytes;
-}
-
-static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
-{
-	crypto_digest_final(desc->tfm, out);
-	return 0;
-}
-
-static inline struct crypto_tfm *crypto_alloc_hash(const char *alg_name,
-                                                   u32 type, u32 mask)
-{
-	struct crypto_tfm *ret = crypto_alloc_tfm(alg_name ,type);
-	return ret ? ret : ERR_PTR(-ENOMEM);
-}
-
-static inline void crypto_free_hash(struct crypto_tfm *tfm)
-{
-	crypto_free_tfm(tfm);
-}
-
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/err.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/err.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/err.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+#ifndef BACKPORT_LINUX_ERR_H
+#define BACKPORT_LINUX_ERR_H
+
+#include_next <linux/err.h>
+
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void *ERR_CAST(const void *ptr)
+{
+	/* cast away the const */
+	return (void *) ptr;
+}
+
+#endif /* BACKPORT_LINUX_ERR_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,12 @@
+#ifndef _BACKPORT_LINUX_FILE_H_
+#define _BACKPORT_LINUX_FILE_H_
+
+#include_next <linux/file.h>
+#include <linux/fs.h>
+
+static inline void drop_file_write_access(struct file *filp)
+{
+	put_write_access(filp->f_dentry->d_inode);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/freezer.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/freezer.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/freezer.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef BACKPORT_LINUX_FREEZER_H
+#define BACKPORT_LINUX_FREEZER_H
+
+static inline void set_freezable(void) {}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,72 @@
+#ifndef BACKPORT_LINUX_FS_H
+#define BACKPORT_LINUX_FS_H
+
+#include_next <linux/fs.h>
+#include <linux/mount.h>
+
+#define FILE_LOCK_DEFERRED 1
+
+#define ATTR_KILL_PRIV  (1 << 14)
+
+static inline void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
+{
+	new->fl_owner = fl->fl_owner;
+	new->fl_pid = fl->fl_pid;
+	new->fl_file = NULL;
+	new->fl_flags = fl->fl_flags;
+	new->fl_type = fl->fl_type;
+	new->fl_start = fl->fl_start;
+	new->fl_end = fl->fl_end;
+	new->fl_ops = NULL;
+	new->fl_lmops = NULL;
+}
+
+#define vfs_setlease(a, b, c) setlease(a, b, c)
+
+static inline int __mandatory_lock(struct inode *ino)
+{
+	return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
+}
+
+#define mandatory_lock(_args) MANDATORY_LOCK(_args)
+
+static inline int backport_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+{
+	return vfs_symlink(dir, dentry, oldname, 0);
+}
+
+#define vfs_symlink(_dir, _dentry, _oldname) backport_vfs_symlink(_dir, _dentry, _oldname)
+
+#ifdef CONFIG_DEBUG_WRITECOUNT
+static inline void file_take_write(struct file *f)
+{
+	WARN_ON(f->f_mnt_write_state != 0);
+	f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
+}
+#else
+static inline void file_take_write(struct file *filp) {}
+#endif
+
+static inline int inode_permission(struct inode *inode, int flags)
+{
+	return permission(inode, flags, NULL);
+}
+
+static inline int __mnt_is_readonly(struct vfsmount *mnt)
+{
+	if (mnt->mnt_sb->s_flags & MS_RDONLY)
+		return 1;
+	return 0;
+}
+
+static inline void drop_nlink(struct inode *inode)
+{
+	inode->i_nlink--;
+}
+
+static inline void clear_nlink(struct inode *inode)
+{
+	inode->i_nlink = 0;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/highmem.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/highmem.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/highmem.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,36 @@
+#ifndef LINUX_HIGHMEM_H
+#define LINUX_HIGHMEM_H
+
+#include_next <linux/highmem.h>
+
+static inline void zero_user_segments(struct page *page,
+	unsigned start1, unsigned end1,
+	unsigned start2, unsigned end2)
+{
+	void *kaddr = kmap_atomic(page, KM_USER0);
+
+	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+
+	if (end1 > start1)
+		memset(kaddr + start1, 0, end1 - start1);
+
+	if (end2 > start2)
+		memset(kaddr + start2, 0, end2 - start2);
+
+	kunmap_atomic(kaddr, KM_USER0);
+	flush_dcache_page(page);
+}
+
+static inline void zero_user_segment(struct page *page,
+	unsigned start, unsigned end)
+{
+	zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+	unsigned start, unsigned size)
+{
+	zero_user_segments(page, start, start + size, 0, 0);
+}
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/if_vlan.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/if_vlan.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/if_vlan.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -21,4 +21,12 @@
 	return vlan_dev_info(dev)->vlan_id;
 }
 
+#define vlan_dev_real_dev(netdev) (VLAN_DEV_INFO(netdev)->real_dev)
+#define vlan_dev_vlan_id(netdev) (VLAN_DEV_INFO(netdev)->vlan_id)
+
+static inline int is_vlan_dev(struct net_device *dev)
+{
+	return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/inet.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/inet.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/inet.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,4 +6,203 @@
 #define INET_ADDRSTRLEN		(16)
 #define INET6_ADDRSTRLEN	(48)
 
+#define IN6PTON_XDIGIT	  	0x00010000
+#define IN6PTON_DIGIT	   	0x00020000
+#define IN6PTON_COLON_MASK      0x00700000
+#define IN6PTON_COLON_1		0x00100000      /* single : requested */
+#define IN6PTON_COLON_2	 	0x00200000      /* second : requested */
+#define IN6PTON_COLON_1_2       0x00400000      /* :: requested */
+#define IN6PTON_DOT	     	0x00800000      /* . */
+#define IN6PTON_DELIM	   	0x10000000
+#define IN6PTON_NULL	    	0x20000000      /* first/tail */
+#define IN6PTON_UNKNOWN	 	0x40000000
+
+static inline int xdigit2bin(char c, int delim)
+{
+	if (c == delim || c == '\0')
+		return IN6PTON_DELIM;
+	if (c == ':')
+		return IN6PTON_COLON_MASK;
+	if (c == '.')
+		return IN6PTON_DOT;
+	if (c >= '0' && c <= '9')
+		return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0'));
+	if (c >= 'a' && c <= 'f')
+		return (IN6PTON_XDIGIT | (c - 'a' + 10));
+	if (c >= 'A' && c <= 'F')
+		return (IN6PTON_XDIGIT | (c - 'A' + 10));
+	if (delim == -1)
+		return IN6PTON_DELIM;
+	return IN6PTON_UNKNOWN;
+}
+
+static inline int in4_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s;
+	u8 *d;
+	u8 dbuf[4];
+	int ret = 0;
+	int i;
+	int w = 0;
+
+	if (srclen < 0)
+		srclen = strlen(src);
+	s = src;
+	d = dbuf;
+	i = 0;
+	while(1) {
+		int c;
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
+			goto out;
+		}
+		if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			if (w == 0)
+				goto out;
+			*d++ = w & 0xff;
+			w = 0;
+			i++;
+			if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+				if (i != 4)
+					goto out;
+				break;
+			}
+			goto cont;
+		}
+		w = (w * 10) + c;
+		if ((w & 0xffff) > 255) {
+			goto out;
+		}
+cont:
+		if (i >= 4)
+			goto out;
+		s++;
+		srclen--;
+	}
+	ret = 1;
+	memcpy(dst, dbuf, sizeof(dbuf));
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
+static inline int in6_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s, *tok = NULL;
+	u8 *d, *dc = NULL;
+	u8 dbuf[16];
+	int ret = 0;
+	int i;
+	int state = IN6PTON_COLON_1_2 | IN6PTON_XDIGIT | IN6PTON_NULL;
+	int w = 0;
+
+	memset(dbuf, 0, sizeof(dbuf));
+
+	s = src;
+	d = dbuf;
+	if (srclen < 0)
+		srclen = strlen(src);
+
+	while (1) {
+		int c;
+
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & state))
+			goto out;
+		if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			/* process one 16-bit word */
+			if (!(state & IN6PTON_NULL)) {
+				*d++ = (w >> 8) & 0xff;
+				*d++ = w & 0xff;
+			}
+			w = 0;
+			if (c & IN6PTON_DELIM) {
+				/* We've processed last word */
+				break;
+			}
+			/*
+			 * COLON_1 => XDIGIT
+			 * COLON_2 => XDIGIT|DELIM
+			 * COLON_1_2 => COLON_2
+			 */
+			switch (state & IN6PTON_COLON_MASK) {
+			case IN6PTON_COLON_2:
+				dc = d;
+				state = IN6PTON_XDIGIT | IN6PTON_DELIM;
+				if (dc - dbuf >= sizeof(dbuf))
+					state |= IN6PTON_NULL;
+				break;
+			case IN6PTON_COLON_1|IN6PTON_COLON_1_2:
+				state = IN6PTON_XDIGIT | IN6PTON_COLON_2;
+				break;
+			case IN6PTON_COLON_1:
+				state = IN6PTON_XDIGIT;
+				break;
+			case IN6PTON_COLON_1_2:
+				state = IN6PTON_COLON_2;
+				break;
+			default:
+				state = 0;
+			}
+			tok = s + 1;
+			goto cont;
+		}
+
+		if (c & IN6PTON_DOT) {
+			ret = in4_pton(tok ? tok : s, srclen + (int)(s - tok), d, delim, &s);
+			if (ret > 0) {
+				d += 4;
+				break;
+			}
+			goto out;
+		}
+
+		w = (w << 4) | (0xff & c);
+		state = IN6PTON_COLON_1 | IN6PTON_DELIM;
+		if (!(w & 0xf000)) {
+			state |= IN6PTON_XDIGIT;
+		}
+		if (!dc && d + 2 < dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_COLON_1_2;
+			state &= ~IN6PTON_DELIM;
+		}
+		if (d + 2 >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_COLON_1|IN6PTON_COLON_1_2);
+		}
+cont:
+		if ((dc && d + 4 < dbuf + sizeof(dbuf)) ||
+		    d + 4 == dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_DOT;
+		}
+		if (d >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_XDIGIT|IN6PTON_COLON_MASK);
+		}
+		s++;
+		srclen--;
+	}
+
+	i = 15; d--;
+
+	if (dc) {
+		while(d >= dc)
+			dst[i--] = *d--;
+		while(i >= dc - dbuf)
+			dst[i--] = 0;
+		while(i >= 0)
+			dst[i--] = *d--;
+	} else
+		memcpy(dst, dbuf, sizeof(dbuf));
+
+	ret = 1;
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
 #endif /* __BACKPORT_LINUX_INET_H_TO_2_6_26__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/jiffies.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/jiffies.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/jiffies.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,10 @@
+#ifndef _JIFFIES_BACKPORT_H
+#define _JIFFIES_BACKPORT_H
+
+#include_next <linux/jiffies.h>
+
+#define time_in_range(a,b,c) \
+	(time_after_eq(a,b) && \
+	 time_before_eq(a,c))
+
+#endif /* _JIFFIES_BACKPORT_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kernel.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kernel.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kernel.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,12 +3,31 @@
 
 #include_next <linux/kernel.h>
 
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+#include <asm/errno.h>
+#include <asm/string.h>
 
-#endif
-#ifndef BACKPORT_KERNEL_H_2_6_19
-#define BACKPORT_KERNEL_H_2_6_19
+#define USHORT_MAX	((u16)(~0U))
 
-#include <linux/log2.h>
+static inline int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
+{
+	char *tail;
+	unsigned long val;
+	size_t len;
 
+	*res = 0;
+	len = strlen(cp);
+	if (len == 0)
+		return -EINVAL;
+
+	val = simple_strtoul(cp, &tail, base);
+	if ((*tail == '\0') ||
+		((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
+		*res = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kobject.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kobject.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/kobject.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -33,4 +33,5 @@
 int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
                          struct kobject *parent, const char *fmt, ...);
 
+
 #endif /* __BACKPORT_KOBJECT_H_TO_2_6_24__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/list.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/list.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/list.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#define __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#include_next<linux/list.h>
+
+#define list_first_entry(ptr, type, member) \
+	list_entry((ptr)->next, type, member)
+
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/log2.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/log2.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/log2.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -64,6 +64,15 @@
 	return 1UL << fls_long(n - 1);
 }
 
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+	return 1UL << (fls_long(n) - 1);
+}
+
 /**
  * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
  * @n - parameter
@@ -166,4 +175,20 @@
 	__roundup_pow_of_two(n)			\
  )
 
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n)			\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n == 1) ? 0 :			\
+		(1UL << ilog2(n))) :		\
+	__rounddown_pow_of_two(n)		\
+ )
+
 #endif /* _LINUX_LOG2_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/magic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/magic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/magic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef BACKPORT_LINUX_MAGIC_H
+#define BACKPORT_LINUX_MAGIC_H
+
+#define NFS_SUPER_MAGIC		0x6969
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mm.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mm.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mm.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -2,26 +2,39 @@
 #define _BACKPORT_LINUX_MM_H_
 
 #include_next <linux/mm.h>
+#include <linux/vmstat.h>
 
 #if defined(__i386__)
 #include <asm/highmem.h>
 #endif
 
-/*
- * Determine if an address is within the vmalloc range
- *
- * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
- * is no special casing required.
- */
-static inline int is_vmalloc_addr(const void *x)
+#define VM_CAN_NONLINEAR 0x08000000     /* Has ->fault & does nonlinear pages */
+
+#define is_vmalloc_addr(x) ((unsigned long)(x) >= VMALLOC_START && (unsigned long)(x) < VMALLOC_END)
+
+struct shrinker {
+	shrinker_t		shrink;
+	struct list_head	list;
+	int			seeks;  /* seeks to recreate an obj */
+	long			nr;     /* objs pending delete */
+};
+
+static inline void task_io_account_cancelled_write(size_t bytes)
 {
-#ifdef CONFIG_MMU
-	unsigned long addr = (unsigned long)x;
+}
 
-	return addr >= VMALLOC_START && addr < VMALLOC_END;
-#else
-	return 0;
-#endif
+static inline void cancel_dirty_page(struct page *page, unsigned int account_size)
+{
+	if (TestClearPageDirty(page)) {
+		struct address_space *mapping = page->mapping;
+		if (mapping && mapping_cap_account_dirty(mapping)) {
+			dec_zone_page_state(page, NR_FILE_DIRTY);
+			dec_bdi_stat(mapping->backing_dev_info,
+					BDI_RECLAIMABLE);
+			if (account_size)
+				task_io_account_cancelled_write(account_size);
+		}
+	}
 }
 
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mount.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mount.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mount.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,11 @@
+#ifndef BACKPORT_LINUX_MOUNT_H
+#define BACKPORT_LINUX_MOUNT_H
+
+#include_next <linux/mount.h>
+#include <linux/fs.h>
+
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+extern int init_mnt_writers(void);
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mpage.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mpage.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/mpage.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,15 @@
+#ifndef BACKPORT_LINUX_MPAGE_H
+#define BACKPORT_LINUX_MPAGE_H
+
+#include_next <linux/mpage.h>
+#include <linux/pagevec.h>
+
+typedef int (*backport_writepage_t)(struct page *page, struct writeback_control *wbc,
+                                void *data);
+
+extern int backport_write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data);
+
+#define write_cache_pages backport_write_cache_pages
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/namei.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/namei.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/namei.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+#ifndef BACKPORT_LINUX_NAMEI_H
+#define BACKPORT_LINUX_NAMEI_H
+
+#include_next <linux/namei.h>
+#include <linux/mount.h>
+
+static inline int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
+		    const char *name, unsigned int flags,
+		    struct nameidata *nd)
+{
+	int retval;
+
+	/* same as do_path_lookup */
+	nd->last_type = LAST_ROOT;
+	nd->flags = flags;
+	nd->depth = 0;
+
+	nd->dentry = dentry;
+	nd->mnt = mnt;
+	mntget(nd->mnt);
+	dget(nd->dentry);
+
+	retval = path_walk(name, nd);
+
+	return retval;
+}
+#endif /* BACKPORT_LINUX_NAMEI_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/net.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/net.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/net.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -4,4 +4,16 @@
 #include_next <linux/net.h>
 #include <linux/random.h>
 
+enum sock_shutdown_cmd {
+	SHUT_RD		= 0,
+	SHUT_WR		= 1,
+	SHUT_RDWR	= 2,
+};
+
+
+static inline int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd flags)
+{
+	return sock->ops->shutdown(sock, flags);
+}
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,6 +15,7 @@
 	(netdev)->ethtool_ops = (struct ethtool_ops *)(ops)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/pagemap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/pagemap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/pagemap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_PAGEMAP_H
+#define BACKPORT_LINUX_PAGEMAP_H
+
+#include_next <linux/pagemap.h>
+
+#define __grab_cache_page	grab_cache_page
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/path.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/path.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/path.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,36 @@
+#ifndef _BACKPORT_LINUX_PATH_H
+#define _BACKPORT_LINUX_PATH_H
+
+#include <linux/mount.h>
+#include <linux/namei.h>
+
+struct path {
+	struct vfsmount *mnt;
+	struct dentry *dentry;
+};
+
+static inline void path_put(struct path *path)
+{
+	dput(path->dentry);
+	mntput(path->mnt);
+}
+
+static inline void path_get(struct path *path)
+{
+	mntget(path->mnt);
+	dget(path->dentry);
+}
+
+static inline void backport_path_put(struct nameidata *nd)
+{
+	dput(nd->dentry);
+	mntput(nd->mnt);
+}
+
+static inline void backport_path_get(struct nameidata *nd)
+{
+	mntget(nd->mnt);
+	dget(nd->dentry);
+}
+
+#endif  /* _BACKPORT_LINUX_PATH_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/proc_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/proc_fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/proc_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+#ifndef BACKPORT_LINUX_PROC_FS_H
+#define BACKPORT_LINUX_PROC_FS_H
+
+#include_next <linux/proc_fs.h>
+
+static inline struct proc_dir_entry *proc_create(const char *name,
+	mode_t mode, struct proc_dir_entry *parent,
+	const struct file_operations *fops)
+{
+	struct proc_dir_entry *res = create_proc_entry(name, mode, parent);
+	if (res)
+		res->proc_fops = fops;
+	return res;
+}
+
+static inline struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+				struct proc_dir_entry *parent,
+				const struct file_operations *proc_fops,
+				void *data)
+{
+		struct proc_dir_entry *pde;
+
+		pde = proc_create(name, mode, parent, proc_fops);
+		if (pde)
+			pde->data = data;
+
+		return pde;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/radix-tree.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/radix-tree.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/radix-tree.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,16 @@
+#ifndef BACKPORT_LINUX_RADIX_TREE_H
+#define BACKPORT_LINUX_RADIX_TREE_H
+
+#include_next <linux/radix-tree.h>
+#if 0
+static inline int radix_tree_preload(gfp_t gfp_mask)
+{
+	return 0;
+}
+
+static inline void radix_tree_preload_end(void)
+{
+}
+
+#endif
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/scatterlist.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/scatterlist.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/scatterlist.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,23 +1,14 @@
 #ifndef __BACKPORT_LINUX_SCATTERLIST_H_TO_2_6_23__
 #define __BACKPORT_LINUX_SCATTERLIST_H_TO_2_6_23__
+
 #include_next<linux/scatterlist.h>
+#include <linux/ncrypto.h>
 
-static inline void sg_set_page(struct scatterlist *sg, struct page *page,
-                               unsigned int len, unsigned int offset)
-{
-	sg->page = page;
-	sg->offset = offset;
-	sg->length = len;
-}
-
 static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
 {
 	sg->page = page;
 }
 
-#define sg_page(a) (a)->page
-#define sg_init_table(a, b)
-
 #define for_each_sg(sglist, sg, nr, __i)	\
 	for (__i = 0, sg = (sglist); __i < (nr); __i++, sg++)
 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sched.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sched.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sched.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+#ifndef LINUX_SCHED_BACKPORT_H
+#define LINUX_SCHED_BACKPORT_H
+
+#include_next <linux/sched.h>
+
+#define TASK_WAKEKILL	   128
+
+#define TASK_KILLABLE	   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+
+#define schedule_timeout_killable(_arg) schedule_timeout_interruptible(_arg)
+
+static inline int __fatal_signal_pending(struct task_struct *tsk)
+{
+	return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+	return signal_pending(p) && __fatal_signal_pending(p);
+}
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+	return tsk->pid;
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/security.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/security.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/security.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,54 @@
+#ifndef BACKPORT_LINUX_SECURITY_H
+#define BACKPORT_LINUX_SECURITY_H
+
+#include_next <linux/security.h>
+
+struct security_mnt_opts {
+	char **mnt_opts;
+	int *mnt_opts_flags;
+	int num_mnt_opts;
+};
+
+static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+{
+	opts->mnt_opts = NULL;
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+{
+	int i;
+	if (opts->mnt_opts)
+		for (i = 0; i < opts->num_mnt_opts; i++)
+			kfree(opts->mnt_opts[i]);
+	kfree(opts->mnt_opts);
+	opts->mnt_opts = NULL;
+	kfree(opts->mnt_opts_flags);
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline int security_sb_set_mnt_opts(struct super_block *sb,
+					   struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+					      struct super_block *newsb)
+{ }
+
+static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline int backport_security_sb_copy_data(void *orig, void *copy)
+{
+	return 0;
+}
+
+#define security_sb_copy_data(a,b) backport_security_sb_copy_data(a,b)
+
+#endif /* BACKPORT_LINUX_SECURITY_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/seq_file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/seq_file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/seq_file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,61 @@
+#ifndef BACKPORT_LINUX_SEQ_FILE_H
+#define BACKPORT_LINUX_SEQ_FILE_H
+
+#include_next <linux/seq_file.h>
+#include <linux/fs.h>
+
+static inline struct list_head *seq_list_start(struct list_head *head, loff_t pos)
+{
+	struct list_head *lh;
+
+	list_for_each(lh, head)
+		if (pos-- == 0)
+			return lh;
+
+	return NULL;
+}
+
+static inline struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
+{
+	if (!pos)
+		return head;
+
+	return seq_list_start(head, pos - 1);
+}
+
+static inline struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
+{
+	struct list_head *lh;
+
+	lh = ((struct list_head *)v)->next;
+	++*ppos;
+	return lh == head ? NULL : lh;
+}
+
+static inline void *__seq_open_private(struct file *f, struct seq_operations *ops,
+		int psize)
+{
+	int rc;
+	void *private;
+	struct seq_file *seq;
+
+	private = kzalloc(psize, GFP_KERNEL);
+	if (private == NULL)
+		goto out;
+
+	rc = seq_open(f, ops);
+	if (rc < 0)
+		goto out_free;
+
+	seq = f->private_data;
+	seq->private = private;
+	return private;
+
+out_free:
+	kfree(private);
+out:
+	return NULL;
+}
+
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/skbuff.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/skbuff.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/skbuff.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,4 +15,9 @@
 #define transport_header h.raw
 #define network_header nh.raw
 
+static inline int skb_csum_unnecessary(const struct sk_buff *skb)
+{
+	return skb->ip_summed & CHECKSUM_UNNECESSARY;
+}
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/string.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/string.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/string.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+#ifndef BACKPORT_LINUX_STRING_H
+#define BACKPORT_LINUX_STRING_H
+
+#include_next <linux/string.h>
+
+extern void *__kmalloc(size_t, gfp_t);
+
+static inline char *kstrndup(const char *s, size_t max, gfp_t gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strnlen(s, max);
+	buf = __kmalloc(len+1, gfp);
+	if (buf) {
+		memcpy(buf, s, len);
+		buf[len] = '\0';
+	}
+	return buf;
+}
+#endif /* BACKPORT_LINUX_STRING_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/swap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/swap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/swap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+#ifndef LINUX_SWAP_BACKPORT_H
+#define LINUX_SWAP_BACKPORT_H
+
+#include_next <linux/swap.h>
+
+static inline unsigned int backport_nr_free_buffer_pages(void)
+{
+	/* Just pick one node, since fallback list is circular */
+	pg_data_t *pgdat = NODE_DATA(numa_node_id());
+	unsigned int sum = 0;
+
+	struct zonelist *zonelist = pgdat->node_zonelists + gfp_zone(GFP_USER);
+	struct zone **zonep = zonelist->zones;
+	struct zone *zone;
+
+	for (zone = *zonep++; zone; zone = *zonep++) {
+		unsigned long size = zone->present_pages;
+		unsigned long high = zone->pages_high;
+		if (size > high)
+			sum += size - high;
+	}
+
+	return sum;
+}
+
+#define nr_free_buffer_pages backport_nr_free_buffer_pages
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sysctl.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sysctl.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/sysctl.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -8,6 +8,8 @@
 #define CTL_NONE	0
 #define CTL_UNNUMBERED	-2	/* sysctl without a binary number */
 
+#define CTL_SUNRPC	7249        /* sunrpc debug */
+
 /* struct ctl_path describes where in the hierarchy a table is added */
 struct ctl_path {
 	const char *procname;
@@ -71,4 +73,11 @@
 
 #define unregister_sysctl_table(hdr)	fake_unregister_sysctl_table(hdr)
 
+static inline struct ctl_table_header *
+backport_register_sysctl_table(ctl_table *table) {
+	return register_sysctl_table(table, 0);
+}
+
+#define register_sysctl_table backport_register_sysctl_table
+
 #endif /* __BACKPORT_SYSCTL_H_TO_2_6_18__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/unaligned/access_ok.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/unaligned/access_ok.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/unaligned/access_ok.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/wait.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/wait.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/wait.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,47 @@
+#ifndef BACKPORT_LINUX_WAIT_H
+#define BACKPORT_LINUX_WAIT_H
+
+#include_next <linux/wait.h>
+
+#define __wait_event_killable(wq, condition, ret)		\
+do {								\
+	DEFINE_WAIT(__wait);					\
+								\
+	for (;;) {						\
+		prepare_to_wait(&wq, &__wait, TASK_KILLABLE);	\
+		if (condition)					\
+			break;					\
+		if (!fatal_signal_pending(current)) {		\
+			schedule();				\
+			continue;				\
+		}						\
+		ret = -ERESTARTSYS;				\
+		break;						\
+	}							\
+	finish_wait(&wq, &__wait);				\
+} while (0)
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition)			\
+({								\
+	int __ret = 0;						\
+	if (!(condition))					\
+		__wait_event_killable(wq, condition, __ret);	\
+	__ret;							\
+})
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/workqueue.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/workqueue.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/linux/workqueue.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -60,5 +60,13 @@
 #define cancel_delayed_work_sync cancel_delayed_work
 #define cancel_rearming_delayed_workqueue backport_cancel_rearming_delayed_workqueue
 #define schedule_delayed_work backport_schedule_delayed_work
+#define cancel_delayed_work_sync cancel_delayed_work
 
+static inline void backport_cancel_rearming_delayed_work(struct delayed_work *work)
+{
+	cancel_delayed_work_sync(work);
+}
+
+#define cancel_rearming_delayed_work backport_cancel_rearming_delayed_work
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/ipv6.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/ipv6.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/ipv6.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+#ifndef BACKPORT_NET_IPV6_H
+#define BACKPORT_NET_IPV6_H
+
+#include_next <net/ipv6.h>
+
+static inline void ipv6_addr_set_v4mapped(const __be32 addr,
+					  struct in6_addr *v4mapped)
+{
+	ipv6_addr_set(v4mapped,
+			0, 0,
+			htonl(0x0000FFFF),
+			addr);
+}
+
+static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+	return ((a->s6_addr32[0] | a->s6_addr32[1] |
+		(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0);
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/rtnetlink.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/rtnetlink.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/rtnetlink.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef __BACKPORT_RTNETLINK_TO_2_6_27__
+#define __BACKPORT_RTNETLINK_TO_2_6_27__
+
+#include <linux/rtnetlink.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/udp.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/udp.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/net/udp.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_NET_UDP_H
+#define BACKPORT_NET_UDP_H
+
+#include_next <net/udp.h>
+
+static inline void UDPX_INC_STATS_BH(struct sock *sk, int field)
+{ }
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_device.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_device.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_device.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef SCSI_SCSI_DEVICE_BACKPORT_TO_2_6_26_H
+#define SCSI_SCSI_DEVICE_BACKPORT_TO_2_6_26_H
+
+#include_next <scsi/scsi_device.h>
+
+#define __starget_for_each_device(scsi_target, p, fn) starget_for_each_device(scsi_target, p, fn)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_transport.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_transport.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi_transport.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_TRANSPORT_BACKPORT_TO_2_6_22_H
+#define SCSI_SCSI_TRANSPORT_BACKPORT_TO_2_6_22_H
+
+#include <scsi/scsi_device.h>
+#include_next <scsi/scsi_transport.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/namespace.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/namespace.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/namespace.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,95 @@
+#include <linux/spinlock_types.h>
+#include <linux/percpu.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+
+struct mnt_writer {
+	/*
+	 * If holding multiple instances of this lock, they
+	 * must be ordered by cpu number.
+	 */
+	spinlock_t lock;
+	struct lock_class_key lock_class; /* compiles out with !lockdep */
+	unsigned long count;
+	struct vfsmount *mnt;
+} ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+
+int __init init_mnt_writers(void)
+{
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
+		spin_lock_init(&writer->lock);
+		lockdep_set_class(&writer->lock, &writer->lock_class);
+		writer->count = 0;
+	}
+	return 0;
+}
+
+static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
+{
+	if (!cpu_writer->mnt)
+		return;
+	/*
+	 * This is in case anyone ever leaves an invalid,
+	 * old ->mnt and a count of 0.
+	 */
+	if (!cpu_writer->count)
+		return;
+	cpu_writer->count = 0;
+}
+
+static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
+					  struct vfsmount *mnt)
+{
+	if (cpu_writer->mnt == mnt)
+		return;
+	__clear_mnt_count(cpu_writer);
+	cpu_writer->mnt = mnt;
+}
+
+int mnt_want_write(struct vfsmount *mnt)
+{
+	int ret = 0;
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+	if (__mnt_is_readonly(mnt)) {
+		ret = -EROFS;
+		goto out;
+	}
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	cpu_writer->count++;
+out:
+	spin_unlock(&cpu_writer->lock);
+	put_cpu_var(mnt_writers);
+	return ret;
+}
+EXPORT_SYMBOL(mnt_want_write);
+
+void mnt_drop_write(struct vfsmount *mnt)
+{
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	if (cpu_writer->count > 0) {
+		cpu_writer->count--;
+	}
+
+	spin_unlock(&cpu_writer->lock);
+	/*
+	 * This could be done right after the spinlock
+	 * is taken because the spinlock keeps us on
+	 * the cpu, and disables preemption.  However,
+	 * putting it here bounds the amount that
+	 * __mnt_writers can underflow.  Without it,
+	 * we could theoretically wrap __mnt_writers.
+	 */
+	put_cpu_var(mnt_writers);
+}
+EXPORT_SYMBOL(mnt_drop_write);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/writeback.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/writeback.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.2/include/src/writeback.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,106 @@
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+#include <linux/writeback.h>
+#include <linux/mpage.h>
+#include <linux/module.h>
+
+int write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data)
+{
+        struct backing_dev_info *bdi = mapping->backing_dev_info;
+        int ret = 0;
+        int done = 0;
+        struct pagevec pvec;
+        int nr_pages;
+        pgoff_t index;
+        pgoff_t end;            /* Inclusive */
+        int scanned = 0;
+        int range_whole = 0;
+        long nr_to_write = wbc->nr_to_write;
+
+        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                wbc->encountered_congestion = 1;
+                return 0;
+        }
+
+        pagevec_init(&pvec, 0);
+        if (wbc->range_cyclic) {
+                index = mapping->writeback_index; /* Start from prev offset */
+                end = -1;
+        } else {
+                index = wbc->range_start >> PAGE_CACHE_SHIFT;
+                end = wbc->range_end >> PAGE_CACHE_SHIFT;
+                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                        range_whole = 1;
+                scanned = 1;
+        }
+retry:
+        while (!done && (index <= end) &&
+               (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                              PAGECACHE_TAG_DIRTY,
+                                              min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+                unsigned i;
+
+                scanned = 1;
+                for (i = 0; i < nr_pages; i++) {
+                        struct page *page = pvec.pages[i];
+
+                        /*
+                         * At this point we hold neither mapping->tree_lock nor
+                         * lock on the page itself: the page may be truncated or
+                         * invalidated (changing page->mapping to NULL), or even
+                         * swizzled back from swapper_space to tmpfs file
+                         * mapping
+                         */
+                        lock_page(page);
+
+                        if (unlikely(page->mapping != mapping)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (!wbc->range_cyclic && page->index > end) {
+                                done = 1;
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (wbc->sync_mode != WB_SYNC_NONE)
+                                wait_on_page_writeback(page);
+
+                        if (PageWriteback(page) ||
+                            !clear_page_dirty_for_io(page)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        ret = (*writepage)(page, wbc, data);
+
+                        if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+                                unlock_page(page);
+                                ret = 0;
+                        }
+                        if (ret || (--nr_to_write <= 0))
+                                done = 1;
+                        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                                wbc->encountered_congestion = 1;
+                                done = 1;
+                        }
+                }
+                pagevec_release(&pvec);
+                cond_resched();
+        }
+        if (!scanned && !done) {
+                /*
+                 * We hit the last page and there is more work to be done: wrap
+                 * back to the start of the file
+                 */
+                scanned = 1;
+                index = 0;
+                goto retry;
+        }
+        return ret;
+}
+EXPORT_SYMBOL(write_cache_pages);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/prom.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/prom.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/prom.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef ASM_PROM_BACKPORT_TO_2_6_21_H
+#define ASM_PROM_BACKPORT_TO_2_6_21_H
+
+#include_next <asm/prom.h>
+
+#define of_get_property(a, b, c)	get_property((a), (b), (c))
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/scatterlist.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/scatterlist.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/scatterlist.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,5 @@
+#if defined(__ia64__)
+#include <linux/pci.h>
+#endif
+#include <asm/types.h>
+#include_next <asm/scatterlist.h>

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/unaligned.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/unaligned.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm/unaligned.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+#define ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+
+#include <linux/unaligned/access_ok.h>
+#include_next <asm/unaligned.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm-generic/atomic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm-generic/atomic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/asm-generic/atomic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,40 @@
+#ifndef __BACKPORT_ASM_GENERIC_ATOMIC_H
+#define __BACKPORT_ASM_GENERIC_ATOMIC_H
+
+#include_next <asm-generic/atomic.h>
+
+#if BITS_PER_LONG == 64
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic64_t *v = (atomic64_t *)l;
+
+	return (long)atomic64_dec_return(v);
+}
+
+#else
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+	atomic_t *v = (atomic_t *)l;
+
+	return (long)atomic_dec_return(v);
+}
+
+#endif  /*  BITS_PER_LONG == 64  */
+
+#endif  /*  __BACKPORT_ASM_GENERIC_ATOMIC_H  */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/backing-dev.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/backing-dev.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/backing-dev.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef BACKPORT_LINUX_BACK_DEV_H
+#define BACKPORT_LINUX_BACK_DEV_H
+
+#include_next <linux/backing-dev.h>
+
+enum bdi_stat_item {
+	BDI_RECLAIMABLE,
+	BDI_WRITEBACK,
+	NR_BDI_STAT_ITEMS
+};
+
+
+static inline void inc_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline int bdi_init(struct backing_dev_info *bdi)
+{
+	return 0;
+}
+
+static inline void bdi_destroy(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+				const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+{
+	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
+}
+
+static inline void bdi_unregister(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+static inline void set_bdi_congested(struct backing_dev_info *bdi, int rw)
+{
+	return;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/capability.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/capability.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/capability.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+#ifndef BACKPORT_LINUX_CAPABILITY_H
+#define BACKPORT_LINUX_CAPABILITY_H
+
+#include_next <linux/capability.h>
+
+/* Override MAC access.
+   The base kernel enforces no MAC policy.
+   An LSM may enforce a MAC policy, and if it does and it chooses
+   to implement capability based overrides of that policy, this is
+   the capability it should use to do so. */
+
+#define CAP_MAC_OVERRIDE     32
+
+#define CAP_FS_MASK_B0	(CAP_TO_MASK(CAP_CHOWN)			\
+			 | CAP_TO_MASK(CAP_DAC_OVERRIDE)	\
+			 | CAP_TO_MASK(CAP_DAC_READ_SEARCH)	\
+			 | CAP_TO_MASK(CAP_FOWNER)		\
+			 | CAP_TO_MASK(CAP_FSETID))
+
+#define CAP_FS_MASK_B1	(CAP_TO_MASK(CAP_MAC_OVERRIDE))
+
+#define CAP_NFSD_SET	(CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE))
+#define CAP_FS_SET	(CAP_FS_MASK_B0)
+
+static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+					      const kernel_cap_t permitted)
+{
+	const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
+	return cap_combine(a,
+			   cap_intersect(permitted, __cap_nfsd_set));
+}
+
+static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
+{
+	const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
+	return cap_drop(a, __cap_fs_set);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/compiler.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/compiler.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/compiler.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_COMPILER_TO_2_6_22_H
+#define BACKPORT_LINUX_COMPILER_TO_2_6_22_H
+
+#include_next <linux/compiler.h>
+
+#define uninitialized_var(x) x = x
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/completion.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/completion.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/completion.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_COMPLETION_H
+#define BACKPORT_LINUX_COMPLETION_H
+
+#include_next <linux/completion.h>
+
+#define wait_for_completion_killable(_args) wait_for_completion_interruptible(_args)
+
+#endif /* BACKPORT_LINUX_COMPLETION_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/crypto.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/crypto.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/crypto.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_LINUX_CRYPTO_H
+#define BACKPORT_LINUX_CRYPTO_H
+
+#include_next <linux/crypto.h>
+#include <linux/ncrypto.h>
+
+#define CRYPTO_ALG_ASYNC	NCRYPTO_ALG_ASYNC 
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-attrs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-attrs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-attrs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,47 @@
+#ifndef __BACKPORT_DMA_ATTR_H_TO_2_6_25__
+#define __BACKPORT_DMA_ATTR_H_TO_2_6_25__
+
+/**
+ * an enum dma_attr represents an attribute associated with a DMA
+ * mapping. The semantics of each attribute should be defined in
+ * Documentation/DMA-attributes.txt.
+ */
+enum dma_attr {
+	DMA_ATTR_WRITE_BARRIER,
+	DMA_ATTR_MAX,
+};
+
+#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
+
+/**
+ * struct dma_attrs - an opaque container for DMA attributes
+ * @flags - bitmask representing a collection of enum dma_attr
+ */
+struct dma_attrs {
+	unsigned long flags[__DMA_ATTRS_LONGS];
+};
+
+#define DEFINE_DMA_ATTRS(x) 					\
+	struct dma_attrs x = {					\
+		.flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 },	\
+	}
+
+#ifdef CONFIG_HAVE_DMA_ATTRS
+/**
+ * dma_set_attr - set a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+	if (attrs == NULL)
+		return;
+	BUG_ON(attr >= DMA_ATTR_MAX);
+	__set_bit(attr, attrs->flags);
+}
+#else /* !CONFIG_HAVE_DMA_ATTRS */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+}
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+#endif /* __BACKPORT_DMA_ATTR_H_TO_2_6_25__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-mapping.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-mapping.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/dma-mapping.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+#ifndef __BACKPORT_LINUX_DMA_MAPPING_H_TO_2_6_25__
+#define __BACKPORT_LINUX_DMA_MAPPING_H_TO_2_6_25__
+
+#include_next <linux/dma-mapping.h>
+
+#ifndef CONFIG_HAVE_DMA_ATTRS
+struct dma_attrs;
+
+#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
+	dma_map_single(dev, cpu_addr, size, dir)
+
+#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
+	dma_unmap_single(dev, dma_addr, size, dir)
+
+#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
+	dma_map_sg(dev, sgl, nents, dir)
+
+#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
+	dma_unmap_sg(dev, sgl, nents, dir)
+
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/etherdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/etherdevice.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/etherdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,15 @@
+#ifndef BACKPORT_LINUX_ETHERDEVICE
+#define BACKPORT_LINUX_ETHERDEVICE
+
+#include_next <linux/etherdevice.h>
+
+static inline unsigned short backport_eth_type_trans(struct sk_buff *skb, 
+						     struct net_device *dev)
+{
+	skb->dev = dev;
+	return eth_type_trans(skb, dev);
+}
+
+#define eth_type_trans backport_eth_type_trans
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,12 @@
+#ifndef _BACKPORT_LINUX_FILE_H_
+#define _BACKPORT_LINUX_FILE_H_
+
+#include_next <linux/file.h>
+#include <linux/fs.h>
+
+static inline void drop_file_write_access(struct file *filp)
+{
+	put_write_access(filp->f_dentry->d_inode);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/freezer.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/freezer.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/freezer.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef BACKPORT_LINUX_FREEZER_H
+#define BACKPORT_LINUX_FREEZER_H
+
+static inline void set_freezable(void) {}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,62 @@
+#ifndef BACKPORT_LINUX_FS_H
+#define BACKPORT_LINUX_FS_H
+
+#include_next <linux/fs.h>
+#include <linux/mount.h>
+
+#define FILE_LOCK_DEFERRED 1
+
+#define ATTR_KILL_PRIV  (1 << 14)
+
+static inline void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
+{
+	new->fl_owner = fl->fl_owner;
+	new->fl_pid = fl->fl_pid;
+	new->fl_file = NULL;
+	new->fl_flags = fl->fl_flags;
+	new->fl_type = fl->fl_type;
+	new->fl_start = fl->fl_start;
+	new->fl_end = fl->fl_end;
+	new->fl_ops = NULL;
+	new->fl_lmops = NULL;
+}
+
+#define vfs_setlease(a, b, c) setlease(a, b, c)
+
+static inline int __mandatory_lock(struct inode *ino)
+{
+	return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
+}
+
+#define mandatory_lock(_args) MANDATORY_LOCK(_args)
+
+static inline int backport_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+{
+	return vfs_symlink(dir, dentry, oldname, 0);
+}
+
+#define vfs_symlink(_dir, _dentry, _oldname) backport_vfs_symlink(_dir, _dentry, _oldname)
+
+#ifdef CONFIG_DEBUG_WRITECOUNT
+static inline void file_take_write(struct file *f)
+{
+	WARN_ON(f->f_mnt_write_state != 0);
+	f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
+}
+#else
+static inline void file_take_write(struct file *filp) {}
+#endif
+
+static inline int inode_permission(struct inode *inode, int flags)
+{
+	return permission(inode, flags, NULL);
+}
+
+static inline int __mnt_is_readonly(struct vfsmount *mnt)
+{
+	if (mnt->mnt_sb->s_flags & MS_RDONLY)
+		return 1;
+	return 0;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/genalloc.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/genalloc.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/genalloc.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,42 @@
+/*
+ * Basic general purpose allocator for managing special purpose memory
+ * not managed by the regular kmalloc/kfree interface.
+ * Uses for this includes on-device special memory, uncached memory
+ * etc.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+
+/*
+ *  General purpose special memory pool descriptor.
+ */
+struct gen_pool {
+	rwlock_t lock;
+	struct list_head chunks;	/* list of chunks in this pool */
+	int min_alloc_order;		/* minimum allocation order */
+};
+
+/*
+ *  General purpose special memory pool chunk descriptor.
+ */
+struct gen_pool_chunk {
+	spinlock_t lock;
+	struct list_head next_chunk;	/* next chunk in pool */
+	unsigned long start_addr;	/* starting address of memory chunk */
+	unsigned long end_addr;		/* ending address of memory chunk */
+	unsigned long bits[0];		/* bitmap for allocating memory chunk */
+};
+
+extern struct gen_pool *ib_gen_pool_create(int, int);
+extern int ib_gen_pool_add(struct gen_pool *, unsigned long, size_t, int);
+extern void ib_gen_pool_destroy(struct gen_pool *);
+extern unsigned long ib_gen_pool_alloc(struct gen_pool *, size_t);
+extern void ib_gen_pool_free(struct gen_pool *, unsigned long, size_t);
+
+#define gen_pool_create ib_gen_pool_create
+#define gen_pool_add ib_gen_pool_add
+#define gen_pool_destroy ib_gen_pool_destroy
+#define gen_pool_alloc ib_gen_pool_alloc
+#define gen_pool_free ib_gen_pool_free

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,10 @@
+#ifndef __BACKPORT_LINUX_IF_H_TO_2_6_18__
+#define __BACKPORT_LINUX_IF_H_TO_2_6_18__
+
+#include_next <linux/if.h>
+
+#if defined(__powerpc64__)
+#define IFF_BONDING	0x20		/* bonding master or slave      */
+#endif
+
+#endif /* __BACKPORT_LINUX_IF_H_TO_2_6_18__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_ether.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_ether.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_ether.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef __BACKPORT_LINUX_IF_ETHER_H_TO_2_6_21__
+#define __BACKPORT_LINUX_IF_ETHER_H_TO_2_6_21__
+
+#include_next <linux/if_ether.h>
+
+#define ETH_FCS_LEN     4               /* Octets in the FCS             */
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_vlan.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_vlan.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/if_vlan.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+#ifndef __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_20__
+#define __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_20__
+
+#include_next <linux/if_vlan.h>
+
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id)
+{
+	return vg->vlan_devices[vlan_id];
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+					 struct net_device *dev)
+{
+	vg->vlan_devices[vlan_id] = dev;
+}
+
+#define vlan_dev_info(x) VLAN_DEV_INFO(x)
+
+static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
+{
+	return vlan_dev_info(dev)->vlan_id;
+}
+
+#define vlan_dev_real_dev(netdev) (VLAN_DEV_INFO(netdev)->real_dev)
+#define vlan_dev_vlan_id(netdev) (VLAN_DEV_INFO(netdev)->vlan_id)
+
+static inline int is_vlan_dev(struct net_device *dev)
+{
+	return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/in.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/in.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/in.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+#ifndef __BACKPORT_LINUX_IN_H_TO_2_6_24__
+#define __BACKPORT_LINUX_IN_H_TO_2_6_24__
+
+#include_next <linux/in.h>
+
+
+static inline bool ipv4_is_loopback(__be32 addr)
+{
+	return (addr & htonl(0xff000000)) == htonl(0x7f000000);
+}
+
+static inline bool ipv4_is_zeronet(__be32 addr)
+{
+	return (addr & htonl(0xff000000)) == htonl(0x00000000);
+}
+
+#endif	/* __BACKPORT_LINUX_IN_H_TO_2_6_24__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,208 @@
+#ifndef __BACKPORT_LINUX_INET_H_TO_2_6_26__
+#define __BACKPORT_LINUX_INET_H_TO_2_6_26__
+
+#include_next <linux/inet.h>
+
+#define INET_ADDRSTRLEN		(16)
+#define INET6_ADDRSTRLEN	(48)
+
+#define IN6PTON_XDIGIT	  	0x00010000
+#define IN6PTON_DIGIT	   	0x00020000
+#define IN6PTON_COLON_MASK      0x00700000
+#define IN6PTON_COLON_1		0x00100000      /* single : requested */
+#define IN6PTON_COLON_2	 	0x00200000      /* second : requested */
+#define IN6PTON_COLON_1_2       0x00400000      /* :: requested */
+#define IN6PTON_DOT	     	0x00800000      /* . */
+#define IN6PTON_DELIM	   	0x10000000
+#define IN6PTON_NULL	    	0x20000000      /* first/tail */
+#define IN6PTON_UNKNOWN	 	0x40000000
+
+static inline int xdigit2bin(char c, int delim)
+{
+	if (c == delim || c == '\0')
+		return IN6PTON_DELIM;
+	if (c == ':')
+		return IN6PTON_COLON_MASK;
+	if (c == '.')
+		return IN6PTON_DOT;
+	if (c >= '0' && c <= '9')
+		return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0'));
+	if (c >= 'a' && c <= 'f')
+		return (IN6PTON_XDIGIT | (c - 'a' + 10));
+	if (c >= 'A' && c <= 'F')
+		return (IN6PTON_XDIGIT | (c - 'A' + 10));
+	if (delim == -1)
+		return IN6PTON_DELIM;
+	return IN6PTON_UNKNOWN;
+}
+
+static inline int in4_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s;
+	u8 *d;
+	u8 dbuf[4];
+	int ret = 0;
+	int i;
+	int w = 0;
+
+	if (srclen < 0)
+		srclen = strlen(src);
+	s = src;
+	d = dbuf;
+	i = 0;
+	while(1) {
+		int c;
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
+			goto out;
+		}
+		if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			if (w == 0)
+				goto out;
+			*d++ = w & 0xff;
+			w = 0;
+			i++;
+			if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+				if (i != 4)
+					goto out;
+				break;
+			}
+			goto cont;
+		}
+		w = (w * 10) + c;
+		if ((w & 0xffff) > 255) {
+			goto out;
+		}
+cont:
+		if (i >= 4)
+			goto out;
+		s++;
+		srclen--;
+	}
+	ret = 1;
+	memcpy(dst, dbuf, sizeof(dbuf));
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
+static inline int in6_pton(const char *src, int srclen,
+	     u8 *dst,
+	     int delim, const char **end)
+{
+	const char *s, *tok = NULL;
+	u8 *d, *dc = NULL;
+	u8 dbuf[16];
+	int ret = 0;
+	int i;
+	int state = IN6PTON_COLON_1_2 | IN6PTON_XDIGIT | IN6PTON_NULL;
+	int w = 0;
+
+	memset(dbuf, 0, sizeof(dbuf));
+
+	s = src;
+	d = dbuf;
+	if (srclen < 0)
+		srclen = strlen(src);
+
+	while (1) {
+		int c;
+
+		c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
+		if (!(c & state))
+			goto out;
+		if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
+			/* process one 16-bit word */
+			if (!(state & IN6PTON_NULL)) {
+				*d++ = (w >> 8) & 0xff;
+				*d++ = w & 0xff;
+			}
+			w = 0;
+			if (c & IN6PTON_DELIM) {
+				/* We've processed last word */
+				break;
+			}
+			/*
+			 * COLON_1 => XDIGIT
+			 * COLON_2 => XDIGIT|DELIM
+			 * COLON_1_2 => COLON_2
+			 */
+			switch (state & IN6PTON_COLON_MASK) {
+			case IN6PTON_COLON_2:
+				dc = d;
+				state = IN6PTON_XDIGIT | IN6PTON_DELIM;
+				if (dc - dbuf >= sizeof(dbuf))
+					state |= IN6PTON_NULL;
+				break;
+			case IN6PTON_COLON_1|IN6PTON_COLON_1_2:
+				state = IN6PTON_XDIGIT | IN6PTON_COLON_2;
+				break;
+			case IN6PTON_COLON_1:
+				state = IN6PTON_XDIGIT;
+				break;
+			case IN6PTON_COLON_1_2:
+				state = IN6PTON_COLON_2;
+				break;
+			default:
+				state = 0;
+			}
+			tok = s + 1;
+			goto cont;
+		}
+
+		if (c & IN6PTON_DOT) {
+			ret = in4_pton(tok ? tok : s, srclen + (int)(s - tok), d, delim, &s);
+			if (ret > 0) {
+				d += 4;
+				break;
+			}
+			goto out;
+		}
+
+		w = (w << 4) | (0xff & c);
+		state = IN6PTON_COLON_1 | IN6PTON_DELIM;
+		if (!(w & 0xf000)) {
+			state |= IN6PTON_XDIGIT;
+		}
+		if (!dc && d + 2 < dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_COLON_1_2;
+			state &= ~IN6PTON_DELIM;
+		}
+		if (d + 2 >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_COLON_1|IN6PTON_COLON_1_2);
+		}
+cont:
+		if ((dc && d + 4 < dbuf + sizeof(dbuf)) ||
+		    d + 4 == dbuf + sizeof(dbuf)) {
+			state |= IN6PTON_DOT;
+		}
+		if (d >= dbuf + sizeof(dbuf)) {
+			state &= ~(IN6PTON_XDIGIT|IN6PTON_COLON_MASK);
+		}
+		s++;
+		srclen--;
+	}
+
+	i = 15; d--;
+
+	if (dc) {
+		while(d >= dc)
+			dst[i--] = *d--;
+		while(i >= dc - dbuf)
+			dst[i--] = 0;
+		while(i >= 0)
+			dst[i--] = *d--;
+	} else
+		memcpy(dst, dbuf, sizeof(dbuf));
+
+	ret = 1;
+out:
+	if (end)
+		*end = s;
+	return ret;
+}
+
+#endif /* __BACKPORT_LINUX_INET_H_TO_2_6_26__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet_lro.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet_lro.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inet_lro.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,180 @@
+#ifndef __BACKPORT_INET_LRO_H_TO_2_6_23__
+#define __BACKPORT_INET_LRO_H_TO_2_6_23__
+
+/*
+ *  linux/include/linux/inet_lro.h
+ *
+ *  Large Receive Offload (ipv4 / tcp)
+ *
+ *  (C) Copyright IBM Corp. 2007
+ *
+ *  Authors:
+ *       Jan-Bernd Themann <themann at de.ibm.com>
+ *       Christoph Raisch <raisch at de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+/*
+ * LRO statistics
+ */
+
+struct net_lro_stats {
+	unsigned long aggregated;
+	unsigned long flushed;
+	unsigned long no_desc;
+};
+
+/*
+ * LRO descriptor for a tcp session
+ */
+struct net_lro_desc {
+	struct sk_buff *parent;
+	struct sk_buff *last_skb;
+	struct skb_frag_struct *next_frag;
+	struct iphdr *iph;
+	struct tcphdr *tcph;
+	struct vlan_group *vgrp;
+	__wsum  data_csum;
+	__be32 tcp_rcv_tsecr;
+	__be32 tcp_rcv_tsval;
+	__be32 tcp_ack;
+	u32 tcp_next_seq;
+	u32 skb_tot_frags_len;
+	u16 ip_tot_len;
+	u16 tcp_saw_tstamp; 		/* timestamps enabled */
+	__be16 tcp_window;
+	u16 vlan_tag;
+	int pkt_aggr_cnt;		/* counts aggregated packets */
+	int vlan_packet;
+	int mss;
+	int active;
+};
+
+/*
+ * Large Receive Offload (LRO) Manager
+ *
+ * Fields must be set by driver
+ */
+
+struct net_lro_mgr {
+	struct net_device *dev;
+	struct net_lro_stats stats;
+
+	/* LRO features */
+	unsigned long features;
+#define LRO_F_NAPI            1  /* Pass packets to stack via NAPI */
+#define LRO_F_EXTRACT_VLAN_ID 2  /* Set flag if VLAN IDs are extracted
+				    from received packets and eth protocol
+				    is still ETH_P_8021Q */
+
+	u32 ip_summed;      /* Set in non generated SKBs in page mode */
+	u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
+			     * or CHECKSUM_NONE */
+
+	int max_desc; /* Max number of LRO descriptors  */
+	int max_aggr; /* Max number of LRO packets to be aggregated */
+
+	int frag_align_pad; /* Padding required to properly align layer 3
+			     * headers in generated skb when using frags */
+
+	struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
+
+	/*
+	 * Optimized driver functions
+	 *
+	 * get_skb_header: returns tcp and ip header for packet in SKB
+	 */
+	int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
+			      void **tcpudp_hdr, u64 *hdr_flags, void *priv);
+
+	/* hdr_flags: */
+#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
+#define LRO_TCP  2 /* tcpudp_hdr is TCP header */
+
+	/*
+	 * get_frag_header: returns mac, tcp and ip header for packet in SKB
+	 *
+	 * @hdr_flags: Indicate what kind of LRO has to be done
+	 *             (IPv4/IPv6/TCP/UDP)
+	 */
+	int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
+			       void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
+			       void *priv);
+};
+
+/*
+ * Processes a SKB
+ *
+ * @lro_mgr: LRO manager to use
+ * @skb: SKB to aggregate
+ * @priv: Private data that may be used by driver functions
+ *        (for example get_tcp_ip_hdr)
+ */
+
+void lro_receive_skb(struct net_lro_mgr *lro_mgr,
+		     struct sk_buff *skb,
+		     void *priv);
+
+/*
+ * Processes a SKB with VLAN HW acceleration support
+ */
+
+void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
+				  struct sk_buff *skb,
+				  struct vlan_group *vgrp,
+				  u16 vlan_tag,
+				  void *priv);
+
+/*
+ * Processes a fragment list
+ *
+ * This functions aggregate fragments and generate SKBs do pass
+ * the packets to the stack.
+ *
+ * @lro_mgr: LRO manager to use
+ * @frags: Fragment to be processed. Must contain entire header in first
+ *         element.
+ * @len: Length of received data
+ * @true_size: Actual size of memory the fragment is consuming
+ * @priv: Private data that may be used by driver functions
+ *        (for example get_tcp_ip_hdr)
+ */
+
+void lro_receive_frags(struct net_lro_mgr *lro_mgr,
+		       struct skb_frag_struct *frags,
+		       int len, int true_size, void *priv, __wsum sum);
+
+void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr,
+				    struct skb_frag_struct *frags,
+				    int len, int true_size,
+				    struct vlan_group *vgrp,
+				    u16 vlan_tag,
+				    void *priv, __wsum sum);
+
+/*
+ * Forward all aggregated SKBs held by lro_mgr to network stack
+ */
+
+void lro_flush_all(struct net_lro_mgr *lro_mgr);
+
+void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
+		   struct iphdr *iph, struct tcphdr *tcph);
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inetdevice.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef _BACKPORT_LINUX_INETDEVICE_H
+#define _BACKPORT_LINUX_INETDEVICE_H
+
+#include_next <linux/inetdevice.h>
+
+#define ip_dev_find(net, addr) ip_dev_find(addr)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/interrupt.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/interrupt.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/interrupt.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+#ifndef BACKPORT_LINUX_INTERRUPT_TO_2_6_18
+#define BACKPORT_LINUX_INTERRUPT_TO_2_6_18
+#include_next <linux/interrupt.h>
+
+typedef irqreturn_t (*backport_irq_handler_t)(int, void *);
+
+static inline int 
+backport_request_irq(unsigned int irq,
+                     irqreturn_t (*handler)(int, void *),
+                     unsigned long flags, const char *dev_name, void *dev_id)
+{
+	return request_irq(irq, 
+		           (irqreturn_t (*)(int, void *, struct pt_regs *))handler, 
+			   flags, dev_name, dev_id);
+}
+
+#define request_irq backport_request_irq
+#define irq_handler_t backport_irq_handler_t
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/jiffies.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/jiffies.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/jiffies.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,10 @@
+#ifndef _JIFFIES_BACKPORT_H
+#define _JIFFIES_BACKPORT_H
+
+#include_next <linux/jiffies.h>
+
+#define time_in_range(a,b,c) \
+	(time_after_eq(a,b) && \
+	 time_before_eq(a,c))
+
+#endif /* _JIFFIES_BACKPORT_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kernel.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kernel.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kernel.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,33 @@
+#ifndef BACKPORT_KERNEL_H_2_6_22
+#define BACKPORT_KERNEL_H_2_6_22
+
+#include_next <linux/kernel.h>
+
+#include <asm/errno.h>
+#include <asm/string.h>
+
+#define USHORT_MAX	((u16)(~0U))
+
+static inline int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
+{
+	char *tail;
+	unsigned long val;
+	size_t len;
+
+	*res = 0;
+	len = strlen(cp);
+	if (len == 0)
+		return -EINVAL;
+
+	val = simple_strtoul(cp, &tail, base);
+	if ((*tail == '\0') ||
+		((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
+		*res = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kobject.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kobject.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/kobject.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,37 @@
+#ifndef __BACKPORT_KOBJECT_H_TO_2_6_24__
+#define __BACKPORT_KOBJECT_H_TO_2_6_24__
+
+#include_next <linux/kobject.h>
+
+
+/**
+ * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs
+ *
+ * @name: the name for the kset
+ * @parent: the parent kobject of this kobject, if any.
+ *
+ * This function creates a kobject structure dynamically and registers it
+ * with sysfs.  When you are finished with this structure, call
+ * kobject_put() and the structure will be dynamically freed when
+ * it is no longer being used.
+ *
+ * If the kobject was not able to be created, NULL will be returned.
+ */
+struct kobject *kobject_create_and_add(const char *name, struct kobject *parent);
+
+/**
+ * kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy
+ * @kobj: pointer to the kobject to initialize
+ * @ktype: pointer to the ktype for this kobject.
+ * @parent: pointer to the parent of this kobject.
+ * @fmt: the name of the kobject.
+ *
+ * This function combines the call to kobject_init() and
+ * kobject_add().  The same type of error handling after a call to
+ * kobject_add() and kobject lifetime rules are the same here.
+ */
+int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+                         struct kobject *parent, const char *fmt, ...);
+
+
+#endif /* __BACKPORT_KOBJECT_H_TO_2_6_24__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/list.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/list.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/list.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#define __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#include_next<linux/list.h>
+
+#define list_first_entry(ptr, type, member) \
+	list_entry((ptr)->next, type, member)
+
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/log2.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/log2.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/log2.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,194 @@
+/* Integer base 2 logarithm calculation
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells at redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_LOG2_H
+#define _LINUX_LOG2_H
+
+#include_next <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+/*
+ * deal with unrepresentable constant logarithms
+ */
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+
+/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ *   more efficiently than using fls() and fls64()
+ * - the arch is not required to handle n==0 if implementing the fallback
+ */
+#ifndef CONFIG_ARCH_HAS_ILOG2_U32
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+	return fls(n) - 1;
+}
+#endif
+
+#ifndef CONFIG_ARCH_HAS_ILOG2_U64
+static inline __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+	return fls64(n) - 1;
+}
+#endif
+
+/*
+ *  Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+	return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+/*
+ * round up to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __roundup_pow_of_two(unsigned long n)
+{
+	return 1UL << fls_long(n - 1);
+}
+
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+	return 1UL << (fls_long(n) - 1);
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ *   the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n)				\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n) < 1 ? ____ilog2_NaN() :	\
+		(n) & (1ULL << 63) ? 63 :	\
+		(n) & (1ULL << 62) ? 62 :	\
+		(n) & (1ULL << 61) ? 61 :	\
+		(n) & (1ULL << 60) ? 60 :	\
+		(n) & (1ULL << 59) ? 59 :	\
+		(n) & (1ULL << 58) ? 58 :	\
+		(n) & (1ULL << 57) ? 57 :	\
+		(n) & (1ULL << 56) ? 56 :	\
+		(n) & (1ULL << 55) ? 55 :	\
+		(n) & (1ULL << 54) ? 54 :	\
+		(n) & (1ULL << 53) ? 53 :	\
+		(n) & (1ULL << 52) ? 52 :	\
+		(n) & (1ULL << 51) ? 51 :	\
+		(n) & (1ULL << 50) ? 50 :	\
+		(n) & (1ULL << 49) ? 49 :	\
+		(n) & (1ULL << 48) ? 48 :	\
+		(n) & (1ULL << 47) ? 47 :	\
+		(n) & (1ULL << 46) ? 46 :	\
+		(n) & (1ULL << 45) ? 45 :	\
+		(n) & (1ULL << 44) ? 44 :	\
+		(n) & (1ULL << 43) ? 43 :	\
+		(n) & (1ULL << 42) ? 42 :	\
+		(n) & (1ULL << 41) ? 41 :	\
+		(n) & (1ULL << 40) ? 40 :	\
+		(n) & (1ULL << 39) ? 39 :	\
+		(n) & (1ULL << 38) ? 38 :	\
+		(n) & (1ULL << 37) ? 37 :	\
+		(n) & (1ULL << 36) ? 36 :	\
+		(n) & (1ULL << 35) ? 35 :	\
+		(n) & (1ULL << 34) ? 34 :	\
+		(n) & (1ULL << 33) ? 33 :	\
+		(n) & (1ULL << 32) ? 32 :	\
+		(n) & (1ULL << 31) ? 31 :	\
+		(n) & (1ULL << 30) ? 30 :	\
+		(n) & (1ULL << 29) ? 29 :	\
+		(n) & (1ULL << 28) ? 28 :	\
+		(n) & (1ULL << 27) ? 27 :	\
+		(n) & (1ULL << 26) ? 26 :	\
+		(n) & (1ULL << 25) ? 25 :	\
+		(n) & (1ULL << 24) ? 24 :	\
+		(n) & (1ULL << 23) ? 23 :	\
+		(n) & (1ULL << 22) ? 22 :	\
+		(n) & (1ULL << 21) ? 21 :	\
+		(n) & (1ULL << 20) ? 20 :	\
+		(n) & (1ULL << 19) ? 19 :	\
+		(n) & (1ULL << 18) ? 18 :	\
+		(n) & (1ULL << 17) ? 17 :	\
+		(n) & (1ULL << 16) ? 16 :	\
+		(n) & (1ULL << 15) ? 15 :	\
+		(n) & (1ULL << 14) ? 14 :	\
+		(n) & (1ULL << 13) ? 13 :	\
+		(n) & (1ULL << 12) ? 12 :	\
+		(n) & (1ULL << 11) ? 11 :	\
+		(n) & (1ULL << 10) ? 10 :	\
+		(n) & (1ULL <<  9) ?  9 :	\
+		(n) & (1ULL <<  8) ?  8 :	\
+		(n) & (1ULL <<  7) ?  7 :	\
+		(n) & (1ULL <<  6) ?  6 :	\
+		(n) & (1ULL <<  5) ?  5 :	\
+		(n) & (1ULL <<  4) ?  4 :	\
+		(n) & (1ULL <<  3) ?  3 :	\
+		(n) & (1ULL <<  2) ?  2 :	\
+		(n) & (1ULL <<  1) ?  1 :	\
+		(n) & (1ULL <<  0) ?  0 :	\
+		____ilog2_NaN()			\
+				   ) :		\
+	(sizeof(n) <= 4) ?			\
+	__ilog2_u32(n) :			\
+	__ilog2_u64(n)				\
+ )
+
+/**
+ * roundup_pow_of_two - round the given value up to nearest power of two
+ * @n - parameter
+ *
+ * round the given balue up to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define roundup_pow_of_two(n)			\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n == 1) ? 0 :			\
+		(1UL << (ilog2((n) - 1) + 1))	\
+				   ) :		\
+	__roundup_pow_of_two(n)			\
+ )
+
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n)			\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n == 1) ? 0 :			\
+		(1UL << ilog2(n))) :		\
+	__rounddown_pow_of_two(n)		\
+ )
+
+#endif /* _LINUX_LOG2_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/magic.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/magic.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/magic.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef BACKPORT_LINUX_MAGIC_H
+#define BACKPORT_LINUX_MAGIC_H
+
+#define NFS_SUPER_MAGIC		0x6969
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mm.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mm.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mm.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,40 @@
+#ifndef _BACKPORT_LINUX_MM_H_
+#define _BACKPORT_LINUX_MM_H_
+
+#include_next <linux/mm.h>
+#include <linux/vmstat.h>
+
+#if defined(__i386__)
+#include <asm/highmem.h>
+#endif
+
+#define VM_CAN_NONLINEAR 0x08000000     /* Has ->fault & does nonlinear pages */
+
+#define is_vmalloc_addr(x) ((unsigned long)(x) >= VMALLOC_START && (unsigned long)(x) < VMALLOC_END)
+
+struct shrinker {
+	shrinker_t		shrink;
+	struct list_head	list;
+	int			seeks;  /* seeks to recreate an obj */
+	long			nr;     /* objs pending delete */
+};
+
+static inline void task_io_account_cancelled_write(size_t bytes)
+{
+}
+
+static inline void cancel_dirty_page(struct page *page, unsigned int account_size)
+{
+	if (TestClearPageDirty(page)) {
+		struct address_space *mapping = page->mapping;
+		if (mapping && mapping_cap_account_dirty(mapping)) {
+			dec_zone_page_state(page, NR_FILE_DIRTY);
+			dec_bdi_stat(mapping->backing_dev_info,
+					BDI_RECLAIMABLE);
+			if (account_size)
+				task_io_account_cancelled_write(account_size);
+		}
+	}
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mount.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mount.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mount.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,11 @@
+#ifndef BACKPORT_LINUX_MOUNT_H
+#define BACKPORT_LINUX_MOUNT_H
+
+#include_next <linux/mount.h>
+#include <linux/fs.h>
+
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+extern int init_mnt_writers(void);
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mpage.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mpage.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/mpage.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,15 @@
+#ifndef BACKPORT_LINUX_MPAGE_H
+#define BACKPORT_LINUX_MPAGE_H
+
+#include_next <linux/mpage.h>
+#include <linux/pagevec.h>
+
+typedef int (*backport_writepage_t)(struct page *page, struct writeback_control *wbc,
+                                void *data);
+
+extern int backport_write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data);
+
+#define write_cache_pages backport_write_cache_pages
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/namei.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/namei.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/namei.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+#ifndef BACKPORT_LINUX_NAMEI_H
+#define BACKPORT_LINUX_NAMEI_H
+
+#include_next <linux/namei.h>
+#include <linux/mount.h>
+
+static inline int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
+		    const char *name, unsigned int flags,
+		    struct nameidata *nd)
+{
+	int retval;
+
+	/* same as do_path_lookup */
+	nd->last_type = LAST_ROOT;
+	nd->flags = flags;
+	nd->depth = 0;
+
+	nd->dentry = dentry;
+	nd->mnt = mnt;
+	mntget(nd->mnt);
+	dget(nd->dentry);
+
+	retval = path_walk(name, nd);
+
+	return retval;
+}
+#endif /* BACKPORT_LINUX_NAMEI_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/net.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/net.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/net.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+#ifndef BACKPORT_LINUX_NET_H
+#define BACKPORT_LINUX_NET_H
+
+#include_next <linux/net.h>
+#include <linux/random.h>
+
+enum sock_shutdown_cmd {
+	SHUT_RD		= 0,
+	SHUT_WR		= 1,
+	SHUT_RDWR	= 2,
+};
+
+
+static inline int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd flags)
+{
+	return sock->ops->shutdown(sock, flags);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netdevice.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,21 @@
+#ifndef BACKPORT_LINUX_NETDEVICE_TO_2_6_18
+#define BACKPORT_LINUX_NETDEVICE_TO_2_6_18
+
+#include_next <linux/netdevice.h>
+
+static inline int skb_checksum_help_to_2_6_18(struct sk_buff *skb)
+{
+        return skb_checksum_help(skb, 0);
+}
+
+#define skb_checksum_help skb_checksum_help_to_2_6_18
+
+#undef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) \
+	(netdev)->ethtool_ops = (struct ethtool_ops *)(ops)
+
+#define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
+
+#define dev_get_by_name(net, name) dev_get_by_name(name)
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netlink.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netlink.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/netlink.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,14 @@
+#ifndef BACKPORT_LINUX_NETLINK_H
+#define BACKPORT_LINUX_NETLINK_H
+
+#include_next <linux/netlink.h>
+
+#define netlink_kernel_create(net, uint, groups, input, mutex, mod) \
+       netlink_kernel_create(uint, groups, input, mod)
+
+static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
+{
+	return (struct nlmsghdr *)skb->data;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/notifier.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/notifier.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/notifier.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+#ifndef __BACKPORT_LINUX_NOTIFIER_H_TO_2_6_26__
+#define __BACKPORT_LINUX_NOTIFIER_H_TO_2_6_26__
+
+#include_next <linux/notifier.h>
+
+#define NETDEV_BONDING_FAILOVER NETDEV_CHANGE
+#define NOTIFY_DONE		0x0000		/* Don't care */
+
+#endif /* __BACKPORT_LINUX_NOTIFIER_H_TO_2_6_26__ */
+
+#ifndef LINUX_NOTIFIER_BACKPORT_TO_2_6_21_H
+#define LINUX_NOTIFIER_BACKPORT_TO_2_6_21_H
+
+/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
+ * operation in progress
+ */
+#define CPU_TASKS_FROZEN       0x0010
+
+#define CPU_ONLINE_FROZEN      (CPU_ONLINE | CPU_TASKS_FROZEN)
+#define CPU_UP_PREPARE_FROZEN  (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
+#define CPU_DOWN_PREPARE_FROZEN        (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
+#define CPU_DEAD_FROZEN                (CPU_DEAD | CPU_TASKS_FROZEN)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pagemap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pagemap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pagemap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_PAGEMAP_H
+#define BACKPORT_LINUX_PAGEMAP_H
+
+#include_next <linux/pagemap.h>
+
+#define __grab_cache_page	grab_cache_page
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/path.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/path.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/path.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,36 @@
+#ifndef _BACKPORT_LINUX_PATH_H
+#define _BACKPORT_LINUX_PATH_H
+
+#include <linux/mount.h>
+#include <linux/namei.h>
+
+struct path {
+	struct vfsmount *mnt;
+	struct dentry *dentry;
+};
+
+static inline void path_put(struct path *path)
+{
+	dput(path->dentry);
+	mntput(path->mnt);
+}
+
+static inline void path_get(struct path *path)
+{
+	mntget(path->mnt);
+	dget(path->dentry);
+}
+
+static inline void backport_path_put(struct nameidata *nd)
+{
+	dput(nd->dentry);
+	mntput(nd->mnt);
+}
+
+static inline void backport_path_get(struct nameidata *nd)
+{
+	mntget(nd->mnt);
+	dget(nd->dentry);
+}
+
+#endif  /* _BACKPORT_LINUX_PATH_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pci.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pci.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/pci.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,21 @@
+#ifndef __BACKPORT_LINUX_PCI_TO_2_6_19__
+#define __BACKPORT_LINUX_PCI_TO_2_6_19__
+
+#include_next <linux/pci.h>
+
+/**
+ * PCI_VDEVICE - macro used to describe a specific pci device in short form
+ * @vend: the vendor name
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI device.  The subvendor, and subdevice fields will be set
+ * to PCI_ANY_ID. The macro allows the next field to follow as the device
+ * private data.
+ */
+
+#define PCI_VDEVICE(vendor, device)            \
+	PCI_VENDOR_ID_##vendor, (device),       \
+	PCI_ANY_ID, PCI_ANY_ID, 0, 0
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/proc_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/proc_fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/proc_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+#ifndef BACKPORT_LINUX_PROC_FS_H
+#define BACKPORT_LINUX_PROC_FS_H
+
+#include_next <linux/proc_fs.h>
+
+static inline struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+				struct proc_dir_entry *parent,
+				const struct file_operations *proc_fops,
+				void *data)
+{
+		struct proc_dir_entry *pde;
+
+		pde = proc_create(name, mode, parent, proc_fops);
+		if (pde)
+			pde->data = data;
+
+		return pde;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/random.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/random.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/random.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_RANDOM_TO_2_6_18
+#define BACKPORT_LINUX_RANDOM_TO_2_6_18
+#include_next <linux/random.h>
+#include_next <linux/net.h>
+
+#define random32() net_random()
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rbtree.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rbtree.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rbtree.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,10 @@
+#ifndef BACKPORT_LINUX_RBTREE_TO_2_6_18
+#define BACKPORT_LINUX_RBTREE_TO_2_6_18
+#include_next <linux/rbtree.h>
+
+/* Band-aid for buggy rbtree.h */
+#undef RB_EMPTY_NODE
+#define RB_EMPTY_NODE(node)	(rb_parent(node) == node)
+
+#endif
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rculist.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rculist.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/rculist.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef LINUX_RCULIST_BACKPORT_tO_2_6_26_H
+#define LINUX_RCULIST_BACKPORT_tO_2_6_26_H
+
+#include_next <linux/list.h>
+#include_next <linux/rcupdate.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/scatterlist.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/scatterlist.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/scatterlist.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+#ifndef __BACKPORT_LINUX_SCATTERLIST_H_TO_2_6_23__
+#define __BACKPORT_LINUX_SCATTERLIST_H_TO_2_6_23__
+
+#include_next<linux/scatterlist.h>
+#include <linux/ncrypto.h>
+
+static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
+{
+	sg->page = page;
+}
+
+#define for_each_sg(sglist, sg, nr, __i)	\
+	for (__i = 0, sg = (sglist); __i < (nr); __i++, sg++)
+
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{
+	if (!sg) {
+		BUG();
+		return NULL;
+	}
+	return sg + 1;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sched.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sched.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sched.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+#ifndef LINUX_SCHED_BACKPORT_H
+#define LINUX_SCHED_BACKPORT_H
+
+#include_next <linux/sched.h>
+
+#define TASK_WAKEKILL	   128
+
+#define TASK_KILLABLE	   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+
+#define schedule_timeout_killable(_arg) schedule_timeout_interruptible(_arg)
+
+static inline int __fatal_signal_pending(struct task_struct *tsk)
+{
+	return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+	return signal_pending(p) && __fatal_signal_pending(p);
+}
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+	return tsk->pid;
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/security.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/security.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/security.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,54 @@
+#ifndef BACKPORT_LINUX_SECURITY_H
+#define BACKPORT_LINUX_SECURITY_H
+
+#include_next <linux/security.h>
+
+struct security_mnt_opts {
+	char **mnt_opts;
+	int *mnt_opts_flags;
+	int num_mnt_opts;
+};
+
+static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+{
+	opts->mnt_opts = NULL;
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+{
+	int i;
+	if (opts->mnt_opts)
+		for (i = 0; i < opts->num_mnt_opts; i++)
+			kfree(opts->mnt_opts[i]);
+	kfree(opts->mnt_opts);
+	opts->mnt_opts = NULL;
+	kfree(opts->mnt_opts_flags);
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline int security_sb_set_mnt_opts(struct super_block *sb,
+					   struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+					      struct super_block *newsb)
+{ }
+
+static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline int backport_security_sb_copy_data(void *orig, void *copy)
+{
+	return 0;
+}
+
+#define security_sb_copy_data(a,b) backport_security_sb_copy_data(a,b)
+
+#endif /* BACKPORT_LINUX_SECURITY_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/semaphore.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/semaphore.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/semaphore.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef __BACKPORT_LINUX_SEMAPHORE_H_TO_2_6_25__
+#define __BACKPORT_LINUX_SEMAPHORE_H_TO_2_6_25__
+
+#include_next <asm/semaphore.h>
+
+#endif /* __BACKPORT_LINUX_SEMAPHORE_H_TO_2_6_25__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/seq_file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/seq_file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/seq_file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,61 @@
+#ifndef BACKPORT_LINUX_SEQ_FILE_H
+#define BACKPORT_LINUX_SEQ_FILE_H
+
+#include_next <linux/seq_file.h>
+#include <linux/fs.h>
+
+static inline struct list_head *seq_list_start(struct list_head *head, loff_t pos)
+{
+	struct list_head *lh;
+
+	list_for_each(lh, head)
+		if (pos-- == 0)
+			return lh;
+
+	return NULL;
+}
+
+static inline struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
+{
+	if (!pos)
+		return head;
+
+	return seq_list_start(head, pos - 1);
+}
+
+static inline struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
+{
+	struct list_head *lh;
+
+	lh = ((struct list_head *)v)->next;
+	++*ppos;
+	return lh == head ? NULL : lh;
+}
+
+static inline void *__seq_open_private(struct file *f, struct seq_operations *ops,
+		int psize)
+{
+	int rc;
+	void *private;
+	struct seq_file *seq;
+
+	private = kzalloc(psize, GFP_KERNEL);
+	if (private == NULL)
+		goto out;
+
+	rc = seq_open(f, ops);
+	if (rc < 0)
+		goto out_free;
+
+	seq = f->private_data;
+	seq->private = private;
+	return private;
+
+out_free:
+	kfree(private);
+out:
+	return NULL;
+}
+
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/skbuff.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/skbuff.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/skbuff.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+#ifndef LINUX_SKBUFF_H_BACKPORT
+#define LINUX_SKBUFF_H_BACKPORT
+
+#include_next <linux/skbuff.h>
+
+#define CHECKSUM_PARTIAL CHECKSUM_HW 
+#define CHECKSUM_COMPLETE CHECKSUM_HW 
+
+#endif
+#ifndef __BACKPORT_LINUX_SKBUFF_H_TO_2_6_21__
+#define __BACKPORT_LINUX_SKBUFF_H_TO_2_6_21__
+
+#include_next <linux/skbuff.h>
+
+#define transport_header h.raw
+#define network_header nh.raw
+
+static inline int skb_csum_unnecessary(const struct sk_buff *skb)
+{
+	return skb->ip_summed & CHECKSUM_UNNECESSARY;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/slab.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/slab.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/slab.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+#include_next <linux/slab.h>
+
+#ifndef LINUX_SLAB_BACKPORT_tO_2_6_22_H
+#define LINUX_SLAB_BACKPORT_tO_2_6_22_H
+
+#include_next <linux/slab.h>
+
+static inline
+struct kmem_cache *
+kmem_cache_create_for_2_6_22 (const char *name, size_t size, size_t align,
+			      unsigned long flags,
+			      void (*ctor)(void*, struct kmem_cache *, unsigned long)
+			      )
+{
+	return kmem_cache_create(name, size, align, flags, ctor, NULL);
+}
+
+#define kmem_cache_create kmem_cache_create_for_2_6_22
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/smp_lock.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/smp_lock.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/smp_lock.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,18 @@
+#ifndef LINUX_SMPLOCK_BACKPORT_tO_2_6_26_H
+#define LINUX_SMPLOCK_BACKPORT_tO_2_6_26_H
+
+#include_next <linux/smp_lock.h>
+
+/*
+ * Various legacy drivers don't really need the BKL in a specific
+ * function, but they *do* need to know that the BKL became available.
+ * This function just avoids wrapping a bunch of lock/unlock pairs
+ * around code which doesn't really need it.
+ */
+static inline void cycle_kernel_lock(void)
+{
+	lock_kernel();
+	unlock_kernel();
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/string.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/string.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/string.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+#ifndef BACKPORT_LINUX_STRING_H
+#define BACKPORT_LINUX_STRING_H
+
+#include_next <linux/string.h>
+
+extern void *__kmalloc(size_t, gfp_t);
+
+static inline char *kstrndup(const char *s, size_t max, gfp_t gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strnlen(s, max);
+	buf = __kmalloc(len+1, gfp);
+	if (buf) {
+		memcpy(buf, s, len);
+		buf[len] = '\0';
+	}
+	return buf;
+}
+#endif /* BACKPORT_LINUX_STRING_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/swap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/swap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/swap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+#ifndef LINUX_SWAP_BACKPORT_H
+#define LINUX_SWAP_BACKPORT_H
+
+#include_next <linux/swap.h>
+
+static inline unsigned int backport_nr_free_buffer_pages(void)
+{
+	/* Just pick one node, since fallback list is circular */
+	pg_data_t *pgdat = NODE_DATA(numa_node_id());
+	unsigned int sum = 0;
+
+	struct zonelist *zonelist = pgdat->node_zonelists + gfp_zone(GFP_USER);
+	struct zone **zonep = zonelist->zones;
+	struct zone *zone;
+
+	for (zone = *zonep++; zone; zone = *zonep++) {
+		unsigned long size = zone->present_pages;
+		unsigned long high = zone->pages_high;
+		if (size > high)
+			sum += size - high;
+	}
+
+	return sum;
+}
+
+#define nr_free_buffer_pages backport_nr_free_buffer_pages
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sysctl.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sysctl.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/sysctl.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,83 @@
+#ifndef __BACKPORT_SYSCTL_H_TO_2_6_18__
+#define __BACKPORT_SYSCTL_H_TO_2_6_18__
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include_next <linux/sysctl.h>
+
+#define CTL_NONE	0
+#define CTL_UNNUMBERED	-2	/* sysctl without a binary number */
+
+#define CTL_SUNRPC	7249        /* sunrpc debug */
+
+/* struct ctl_path describes where in the hierarchy a table is added */
+struct ctl_path {
+	const char *procname;
+	int ctl_name;
+};
+
+#define FAKE_SYSCTL_MAGIC1	((void *) 0xcafebabe)
+
+static inline void __fake_sysctl_table_destroy(struct ctl_table *node)
+{
+	struct ctl_table *next;
+
+	while (node && node[1].extra1 == FAKE_SYSCTL_MAGIC1) {
+		next = node->child;
+		kfree(node);
+		node = next;
+	}
+}
+
+/*
+ * Given a ctl_path and a ctl_table, convert this to the old-fashioned
+ * table hierarchy, linked through table->child.
+ */
+static inline struct ctl_table_header *
+register_sysctl_paths(const struct ctl_path *path, struct ctl_table *table)
+{
+	struct ctl_table_header *result = NULL;
+	struct ctl_table *root = NULL, *tp, **prev = &root;
+
+	for (; path->procname; ++path) {
+		tp = kzalloc(2 * sizeof(struct ctl_table), GFP_KERNEL);
+		if (!tp)
+			goto out;
+
+		tp->ctl_name = path->ctl_name;
+		tp->procname = path->procname;
+		tp->mode = 0555;
+		tp[1].extra1 = FAKE_SYSCTL_MAGIC1;
+		*prev = tp;
+		prev = &tp->child;
+	}
+	*prev = table;
+
+	result = register_sysctl_table(root, 0);
+
+out:
+	if (result == NULL)
+		__fake_sysctl_table_destroy(root);
+
+	return result;
+}
+
+static inline void
+fake_unregister_sysctl_table(struct ctl_table_header *hdr)
+{
+	struct ctl_table *node = hdr->ctl_table;
+
+	unregister_sysctl_table(hdr);
+	__fake_sysctl_table_destroy(node);
+}
+
+#define unregister_sysctl_table(hdr)	fake_unregister_sysctl_table(hdr)
+
+static inline struct ctl_table_header *
+backport_register_sysctl_table(ctl_table *table) {
+	return register_sysctl_table(table, 0);
+}
+
+#define register_sysctl_table backport_register_sysctl_table
+
+#endif /* __BACKPORT_SYSCTL_H_TO_2_6_18__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/types.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/types.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/types.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_LINUX_TYPES_TO_2_6_19
+#define BACKPORT_LINUX_TYPES_TO_2_6_19
+
+#include_next <linux/types.h>
+
+typedef __u16	__sum16;
+typedef __u32	__wsum;
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/unaligned/access_ok.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/unaligned/access_ok.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/unaligned/access_ok.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/wait.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/wait.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/wait.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,47 @@
+#ifndef BACKPORT_LINUX_WAIT_H
+#define BACKPORT_LINUX_WAIT_H
+
+#include_next <linux/wait.h>
+
+#define __wait_event_killable(wq, condition, ret)		\
+do {								\
+	DEFINE_WAIT(__wait);					\
+								\
+	for (;;) {						\
+		prepare_to_wait(&wq, &__wait, TASK_KILLABLE);	\
+		if (condition)					\
+			break;					\
+		if (!fatal_signal_pending(current)) {		\
+			schedule();				\
+			continue;				\
+		}						\
+		ret = -ERESTARTSYS;				\
+		break;						\
+	}							\
+	finish_wait(&wq, &__wait);				\
+} while (0)
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition)			\
+({								\
+	int __ret = 0;						\
+	if (!(condition))					\
+		__wait_event_killable(wq, condition, __ret);	\
+	__ret;							\
+})
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/workqueue.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/workqueue.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/linux/workqueue.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,72 @@
+#ifndef BACKPORT_LINUX_WORKQUEUE_TO_2_6_19
+#define BACKPORT_LINUX_WORKQUEUE_TO_2_6_19
+
+#include_next <linux/workqueue.h>
+
+struct delayed_work {
+	struct work_struct work;
+};
+
+static inline void
+backport_INIT_WORK(struct work_struct *work, void *func)
+{
+	INIT_WORK(work, func, work);
+}
+
+static inline int backport_queue_delayed_work(struct workqueue_struct *wq,
+					      struct delayed_work *work,
+					      unsigned long delay)
+{
+	if (likely(!delay))
+		return queue_work(wq, &work->work);
+	else
+		return queue_delayed_work(wq, &work->work, delay);
+}
+
+static inline int 
+backport_cancel_delayed_work(struct delayed_work *work)
+{
+	return cancel_delayed_work(&work->work);
+}
+
+static inline void 
+backport_cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, struct delayed_work *work)
+{
+	cancel_rearming_delayed_workqueue(wq, &work->work);
+}
+
+static inline
+int backport_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
+{
+	if (likely(!delay))
+		return schedule_work(&work->work);
+	else
+		return schedule_delayed_work(&work->work, delay);
+}
+
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) backport_INIT_WORK(_work, _func)
+#define INIT_DELAYED_WORK(_work, _func) INIT_WORK(&(_work)->work, _func)
+#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) INIT_DELAYED_WORK(_work, _func)
+
+#undef DECLARE_WORK
+#define DECLARE_WORK(n, f) \
+	struct work_struct n = __WORK_INITIALIZER(n, (void (*)(void *))f, &(n))
+#define DECLARE_DELAYED_WORK(n, f) \
+	struct delayed_work n = { .work = __WORK_INITIALIZER(n.work, (void (*)(void *))f, &(n.work)) }
+
+#define queue_delayed_work backport_queue_delayed_work
+#define cancel_delayed_work backport_cancel_delayed_work
+#define cancel_delayed_work_sync cancel_delayed_work
+#define cancel_rearming_delayed_workqueue backport_cancel_rearming_delayed_workqueue
+#define schedule_delayed_work backport_schedule_delayed_work
+#define cancel_delayed_work_sync cancel_delayed_work
+
+static inline void backport_cancel_rearming_delayed_work(struct delayed_work *work)
+{
+	cancel_delayed_work_sync(work);
+}
+
+#define cancel_rearming_delayed_work backport_cancel_rearming_delayed_work
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/checksum.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/checksum.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/checksum.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,11 @@
+#ifndef __BACKPORT_CHECKSUM_H_TO_2_6_19__
+#define __BACKPORT_CHECKSUM_H_TO_2_6_19__
+
+#include_next <net/checksum.h>
+
+static inline __wsum csum_unfold(__sum16 n)
+{
+	return (__force __wsum)n;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ip.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ip.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ip.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,51 @@
+#ifndef __BACKPORT_NET_IP_H_TO_2_6_23__
+#define __BACKPORT_NET_IP_H_TO_2_6_23__
+
+#include_next<net/ip.h>
+#define inet_get_local_port_range(a, b) { *(a) = sysctl_local_port_range[0]; *(b) = sysctl_local_port_range[1]; }
+
+#endif
+
+
+#ifndef __BACKPORT_IP_H_TO_2_6_24__
+#define __BACKPORT_IP_H_TO_2_6_24__
+
+#include_next <net/ip.h>
+
+static inline void 
+backport_ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
+{
+	__u32 addr;
+	unsigned char scope = broadcast[5] & 0xF;
+
+	buf[0]  = 0;		/* Reserved */
+	buf[1]  = 0xff;		/* Multicast QPN */
+	buf[2]  = 0xff;
+	buf[3]  = 0xff;
+	addr    = ntohl(naddr);
+	buf[4]  = 0xff;
+	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
+	buf[6]  = 0x40;		/* IPv4 signature */
+	buf[7]  = 0x1b;
+	buf[8]  = broadcast[8];		/* P_Key */
+	buf[9]  = broadcast[9];
+	buf[10] = 0;
+	buf[11] = 0;
+	buf[12] = 0;
+	buf[13] = 0;
+	buf[14] = 0;
+	buf[15] = 0;
+	buf[19] = addr & 0xff;
+	addr  >>= 8;
+	buf[18] = addr & 0xff;
+	addr  >>= 8;
+	buf[17] = addr & 0xff;
+	addr  >>= 8;
+	buf[16] = addr & 0x0f;
+}
+
+#undef ip_ib_mc_map
+
+#define ip_ib_mc_map(naddr, broadcast, buf) backport_ip_ib_mc_map(naddr, broadcast, buf)
+
+#endif	/* __BACKPORT_IP_H_TO_2_6_24__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ipv6.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ipv6.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/ipv6.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+#ifndef BACKPORT_NET_IPV6_H
+#define BACKPORT_NET_IPV6_H
+
+#include_next <net/ipv6.h>
+
+static inline void ipv6_addr_set_v4mapped(const __be32 addr,
+					  struct in6_addr *v4mapped)
+{
+	ipv6_addr_set(v4mapped,
+			0, 0,
+			htonl(0x0000FFFF),
+			addr);
+}
+
+static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+	return ((a->s6_addr32[0] | a->s6_addr32[1] |
+		(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0);
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/neighbour.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/neighbour.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/neighbour.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef __BACKPORT_NET_NEIGHBOUR_TO_2_6_20__
+#define __BACKPORT_NET_NEIGHBOUR_TO_2_6_20__
+
+#include_next <net/neighbour.h>
+
+#define neigh_cleanup neigh_destructor
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/net_namespace.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/net_namespace.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/net_namespace.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,3 @@
+#ifndef __NET_NAMESPACE_H__
+#define __NET_NAMESPACE_H__
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/route.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/route.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/route.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,13 @@
+#ifndef _BACKPORT_NET_ROUTE_H_
+#define _BACKPORT_NET_ROUTE_H_
+
+#include_next <net/route.h>
+
+#define ip_route_output_flow(net, rp, fl, sk, flags) \
+	ip_route_output_flow(rp, fl, sk, flags)
+
+#define ip_route_output_key(net, rp, fl) ip_route_output_key(rp, fl)
+
+#define inet_addr_type(net, addr) inet_addr_type(addr)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/rtnetlink.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/rtnetlink.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/rtnetlink.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,6 @@
+#ifndef __BACKPORT_RTNETLINK_TO_2_6_27__
+#define __BACKPORT_RTNETLINK_TO_2_6_27__
+
+#include <linux/rtnetlink.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/udp.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/udp.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/net/udp.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_NET_UDP_H
+#define BACKPORT_NET_UDP_H
+
+#include_next <net/udp.h>
+
+static inline void UDPX_INC_STATS_BH(struct sock *sk, int field)
+{ }
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_H_BACKPORT
+#define SCSI_SCSI_H_BACKPORT
+
+#include_next <scsi/scsi.h>
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_cmnd.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_cmnd.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_cmnd.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,13 @@
+#ifndef SCSI_SCSI_CMND_BACKPORT_TO_2_6_22_H
+#define SCSI_SCSI_CMND_BACKPORT_TO_2_6_22_H
+
+#include_next <scsi/scsi_cmnd.h>
+
+#define scsi_sg_count(cmd) ((cmd)->use_sg)
+#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer)
+#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
+
+#define scsi_for_each_sg(cmd, sg, nseg, __i)			\
+	for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_device.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_device.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_device.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef SCSI_SCSI_DEVICE_BACKPORT_TO_2_6_26_H
+#define SCSI_SCSI_DEVICE_BACKPORT_TO_2_6_26_H
+
+#include_next <scsi/scsi_device.h>
+
+#define __starget_for_each_device(scsi_target, p, fn) starget_for_each_device(scsi_target, p, fn)
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_transport.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_transport.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/scsi/scsi_transport.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_TRANSPORT_BACKPORT_TO_2_6_22_H
+#define SCSI_SCSI_TRANSPORT_BACKPORT_TO_2_6_22_H
+
+#include <scsi/scsi_device.h>
+#include_next <scsi/scsi_transport.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/genalloc.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/genalloc.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/genalloc.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,198 @@
+/*
+ * Basic general purpose allocator for managing special purpose memory
+ * not managed by the regular kmalloc/kfree interface.
+ * Uses for this includes on-device special memory, uncached memory
+ * etc.
+ *
+ * Copyright 2005 (C) Jes Sorensen <jes at trained-monkey.org>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/genalloc.h>
+
+
+/**
+ * gen_pool_create - create a new special memory pool
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node id of the node the pool structure should be allocated on, or -1
+ *
+ * Create a new special memory pool that can be used to manage special purpose
+ * memory not managed by the regular kmalloc/kfree interface.
+ */
+struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
+{
+	struct gen_pool *pool;
+
+	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
+	if (pool != NULL) {
+		rwlock_init(&pool->lock);
+		INIT_LIST_HEAD(&pool->chunks);
+		pool->min_alloc_order = min_alloc_order;
+	}
+	return pool;
+}
+EXPORT_SYMBOL(gen_pool_create);
+
+/**
+ * gen_pool_add - add a new chunk of special memory to the pool
+ * @pool: pool to add new memory chunk to
+ * @addr: starting address of memory chunk to add to pool
+ * @size: size in bytes of the memory chunk to add to pool
+ * @nid: node id of the node the chunk structure and bitmap should be
+ *       allocated on, or -1
+ *
+ * Add a new chunk of special memory to the specified pool.
+ */
+int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
+		 int nid)
+{
+	struct gen_pool_chunk *chunk;
+	int nbits = size >> pool->min_alloc_order;
+	int nbytes = sizeof(struct gen_pool_chunk) +
+				(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
+
+	chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+	if (unlikely(chunk == NULL))
+		return -1;
+
+	memset(chunk, 0, nbytes);
+	spin_lock_init(&chunk->lock);
+	chunk->start_addr = addr;
+	chunk->end_addr = addr + size;
+
+	write_lock(&pool->lock);
+	list_add(&chunk->next_chunk, &pool->chunks);
+	write_unlock(&pool->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(gen_pool_add);
+
+/**
+ * gen_pool_destroy - destroy a special memory pool
+ * @pool: pool to destroy
+ *
+ * Destroy the specified special memory pool. Verifies that there are no
+ * outstanding allocations.
+ */
+void gen_pool_destroy(struct gen_pool *pool)
+{
+	struct list_head *_chunk, *_next_chunk;
+	struct gen_pool_chunk *chunk;
+	int order = pool->min_alloc_order;
+	int bit, end_bit;
+
+
+	write_lock(&pool->lock);
+	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
+		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+		list_del(&chunk->next_chunk);
+
+		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+		bit = find_next_bit(chunk->bits, end_bit, 0);
+		BUG_ON(bit < end_bit);
+
+		kfree(chunk);
+	}
+	kfree(pool);
+	return;
+}
+EXPORT_SYMBOL(gen_pool_destroy);
+
+/**
+ * gen_pool_alloc - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses a first-fit algorithm.
+ */
+unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+{
+	struct list_head *_chunk;
+	struct gen_pool_chunk *chunk;
+	unsigned long addr, flags;
+	int order = pool->min_alloc_order;
+	int nbits, bit, start_bit, end_bit;
+
+	if (size == 0)
+		return 0;
+
+	nbits = (size + (1UL << order) - 1) >> order;
+
+	read_lock(&pool->lock);
+	list_for_each(_chunk, &pool->chunks) {
+		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+		end_bit -= nbits + 1;
+
+		spin_lock_irqsave(&chunk->lock, flags);
+		bit = -1;
+		while (bit + 1 < end_bit) {
+			bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
+			if (bit >= end_bit)
+				break;
+
+			start_bit = bit;
+			if (nbits > 1) {
+				bit = find_next_bit(chunk->bits, bit + nbits,
+							bit + 1);
+				if (bit - start_bit < nbits)
+					continue;
+			}
+
+			addr = chunk->start_addr +
+					    ((unsigned long)start_bit << order);
+			while (nbits--)
+				__set_bit(start_bit++, &chunk->bits);
+			spin_unlock_irqrestore(&chunk->lock, flags);
+			read_unlock(&pool->lock);
+			return addr;
+		}
+		spin_unlock_irqrestore(&chunk->lock, flags);
+	}
+	read_unlock(&pool->lock);
+	return 0;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+/**
+ * gen_pool_free - free allocated special memory back to the pool
+ * @pool: pool to free to
+ * @addr: starting address of memory to free back to pool
+ * @size: size in bytes of memory to free
+ *
+ * Free previously allocated special memory back to the specified pool.
+ */
+void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+{
+	struct list_head *_chunk;
+	struct gen_pool_chunk *chunk;
+	unsigned long flags;
+	int order = pool->min_alloc_order;
+	int bit, nbits;
+
+	nbits = (size + (1UL << order) - 1) >> order;
+
+	read_lock(&pool->lock);
+	list_for_each(_chunk, &pool->chunks) {
+		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+			BUG_ON(addr + size > chunk->end_addr);
+			spin_lock_irqsave(&chunk->lock, flags);
+			bit = (addr - chunk->start_addr) >> order;
+			while (nbits--)
+				__clear_bit(bit++, &chunk->bits);
+			spin_unlock_irqrestore(&chunk->lock, flags);
+			break;
+		}
+	}
+	BUG_ON(nbits > 0);
+	read_unlock(&pool->lock);
+}
+EXPORT_SYMBOL(gen_pool_free);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/namespace.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/namespace.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/namespace.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,95 @@
+#include <linux/spinlock_types.h>
+#include <linux/percpu.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+
+struct mnt_writer {
+	/*
+	 * If holding multiple instances of this lock, they
+	 * must be ordered by cpu number.
+	 */
+	spinlock_t lock;
+	struct lock_class_key lock_class; /* compiles out with !lockdep */
+	unsigned long count;
+	struct vfsmount *mnt;
+} ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+
+int __init init_mnt_writers(void)
+{
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
+		spin_lock_init(&writer->lock);
+		lockdep_set_class(&writer->lock, &writer->lock_class);
+		writer->count = 0;
+	}
+	return 0;
+}
+
+static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
+{
+	if (!cpu_writer->mnt)
+		return;
+	/*
+	 * This is in case anyone ever leaves an invalid,
+	 * old ->mnt and a count of 0.
+	 */
+	if (!cpu_writer->count)
+		return;
+	cpu_writer->count = 0;
+}
+
+static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
+					  struct vfsmount *mnt)
+{
+	if (cpu_writer->mnt == mnt)
+		return;
+	__clear_mnt_count(cpu_writer);
+	cpu_writer->mnt = mnt;
+}
+
+int mnt_want_write(struct vfsmount *mnt)
+{
+	int ret = 0;
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+	if (__mnt_is_readonly(mnt)) {
+		ret = -EROFS;
+		goto out;
+	}
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	cpu_writer->count++;
+out:
+	spin_unlock(&cpu_writer->lock);
+	put_cpu_var(mnt_writers);
+	return ret;
+}
+EXPORT_SYMBOL(mnt_want_write);
+
+void mnt_drop_write(struct vfsmount *mnt)
+{
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	if (cpu_writer->count > 0) {
+		cpu_writer->count--;
+	}
+
+	spin_unlock(&cpu_writer->lock);
+	/*
+	 * This could be done right after the spinlock
+	 * is taken because the spinlock keeps us on
+	 * the cpu, and disables preemption.  However,
+	 * putting it here bounds the amount that
+	 * __mnt_writers can underflow.  Without it,
+	 * we could theoretically wrap __mnt_writers.
+	 */
+	put_cpu_var(mnt_writers);
+}
+EXPORT_SYMBOL(mnt_drop_write);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/writeback.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/writeback.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18-EL5.3/include/src/writeback.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,106 @@
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+#include <linux/writeback.h>
+#include <linux/mpage.h>
+#include <linux/module.h>
+
+int backport_write_cache_pages(struct address_space *mapping,
+                      struct writeback_control *wbc, backport_writepage_t writepage,
+                      void *data)
+{
+        struct backing_dev_info *bdi = mapping->backing_dev_info;
+        int ret = 0;
+        int done = 0;
+        struct pagevec pvec;
+        int nr_pages;
+        pgoff_t index;
+        pgoff_t end;            /* Inclusive */
+        int scanned = 0;
+        int range_whole = 0;
+        long nr_to_write = wbc->nr_to_write;
+
+        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                wbc->encountered_congestion = 1;
+                return 0;
+        }
+
+        pagevec_init(&pvec, 0);
+        if (wbc->range_cyclic) {
+                index = mapping->writeback_index; /* Start from prev offset */
+                end = -1;
+        } else {
+                index = wbc->range_start >> PAGE_CACHE_SHIFT;
+                end = wbc->range_end >> PAGE_CACHE_SHIFT;
+                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                        range_whole = 1;
+                scanned = 1;
+        }
+retry:
+        while (!done && (index <= end) &&
+               (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                              PAGECACHE_TAG_DIRTY,
+                                              min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+                unsigned i;
+
+                scanned = 1;
+                for (i = 0; i < nr_pages; i++) {
+                        struct page *page = pvec.pages[i];
+
+                        /*
+                         * At this point we hold neither mapping->tree_lock nor
+                         * lock on the page itself: the page may be truncated or
+                         * invalidated (changing page->mapping to NULL), or even
+                         * swizzled back from swapper_space to tmpfs file
+                         * mapping
+                         */
+                        lock_page(page);
+
+                        if (unlikely(page->mapping != mapping)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (!wbc->range_cyclic && page->index > end) {
+                                done = 1;
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        if (wbc->sync_mode != WB_SYNC_NONE)
+                                wait_on_page_writeback(page);
+
+                        if (PageWriteback(page) ||
+                            !clear_page_dirty_for_io(page)) {
+                                unlock_page(page);
+                                continue;
+                        }
+
+                        ret = (*writepage)(page, wbc, data);
+
+                        if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+                                unlock_page(page);
+                                ret = 0;
+                        }
+                        if (ret || (--nr_to_write <= 0))
+                                done = 1;
+                        if (wbc->nonblocking && bdi_write_congested(bdi)) {
+                                wbc->encountered_congestion = 1;
+                                done = 1;
+                        }
+                }
+                pagevec_release(&pvec);
+                cond_resched();
+        }
+        if (!scanned && !done) {
+                /*
+                 * We hit the last page and there is more work to be done: wrap
+                 * back to the start of the file
+                 */
+                scanned = 1;
+                index = 0;
+                goto retry;
+        }
+        return ret;
+}
+EXPORT_SYMBOL(backport_write_cache_pages);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/list.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/list.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/list.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#define __BACKPORT_LINUX_LIST_H_TO_2_6_24__
+#include_next<linux/list.h>
+
+#define list_first_entry(ptr, type, member) \
+	list_entry((ptr)->next, type, member)
+
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/log2.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/log2.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/log2.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -64,6 +64,15 @@
 	return 1UL << fls_long(n - 1);
 }
 
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+	return 1UL << (fls_long(n) - 1);
+}
+
 /**
  * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
  * @n - parameter
@@ -166,4 +175,20 @@
 	__roundup_pow_of_two(n)			\
  )
 
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n)			\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n == 1) ? 0 :			\
+		(1UL << ilog2(n))) :		\
+	__rounddown_pow_of_two(n)		\
+ )
+
 #endif /* _LINUX_LOG2_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_FC6/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,6 +15,7 @@
 	(netdev)->ethtool_ops = (struct ethtool_ops *)(ops)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_suse10_2/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_suse10_2/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.18_suse10_2/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,6 +15,7 @@
 	(netdev)->ethtool_ops = (struct ethtool_ops *)(ops)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.19/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.19/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.19/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,5 +6,6 @@
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.20/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.20/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.20/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,5 +6,6 @@
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.21/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.21/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.21/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,5 +6,6 @@
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/backing-dev.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/backing-dev.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/backing-dev.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,57 @@
+#ifndef BACKPORT_LINUX_BACK_DEV_H
+#define BACKPORT_LINUX_BACK_DEV_H
+
+#include_next <linux/backing-dev.h>
+
+enum bdi_stat_item {
+	BDI_RECLAIMABLE,
+	BDI_WRITEBACK,
+	NR_BDI_STAT_ITEMS
+};
+
+
+static inline void inc_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline void dec_bdi_stat(struct backing_dev_info *bdi,
+		enum bdi_stat_item item)
+{
+	return;
+}
+
+static inline int bdi_init(struct backing_dev_info *bdi)
+{
+	return 0;
+}
+
+static inline void bdi_destroy(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+static inline int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+				const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+{
+	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
+}
+
+static inline void bdi_unregister(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/capability.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/capability.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/capability.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+#ifndef BACKPORT_LINUX_CAPABILITY_H
+#define BACKPORT_LINUX_CAPABILITY_H
+
+#include_next <linux/capability.h>
+
+/* Override MAC access.
+   The base kernel enforces no MAC policy.
+   An LSM may enforce a MAC policy, and if it does and it chooses
+   to implement capability based overrides of that policy, this is
+   the capability it should use to do so. */
+
+#define CAP_MAC_OVERRIDE     32
+
+#define CAP_FS_MASK_B0	(CAP_TO_MASK(CAP_CHOWN)			\
+			 | CAP_TO_MASK(CAP_DAC_OVERRIDE)	\
+			 | CAP_TO_MASK(CAP_DAC_READ_SEARCH)	\
+			 | CAP_TO_MASK(CAP_FOWNER)		\
+			 | CAP_TO_MASK(CAP_FSETID))
+
+#define CAP_FS_MASK_B1	(CAP_TO_MASK(CAP_MAC_OVERRIDE))
+
+#define CAP_NFSD_SET	(CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE))
+#define CAP_FS_SET	(CAP_FS_MASK_B0)
+
+static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+					      const kernel_cap_t permitted)
+{
+	const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
+	return cap_combine(a,
+			   cap_intersect(permitted, __cap_nfsd_set));
+}
+
+static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
+{
+	const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
+	return cap_drop(a, __cap_fs_set);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/completion.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/completion.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/completion.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_COMPLETION_H
+#define BACKPORT_LINUX_COMPLETION_H
+
+#include_next <linux/completion.h>
+
+#define wait_for_completion_killable(_args) wait_for_completion_interruptible(_args)
+
+#endif /* BACKPORT_LINUX_COMPLETION_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/err.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/err.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/err.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+#ifndef BACKPORT_LINUX_ERR_H
+#define BACKPORT_LINUX_ERR_H
+
+#include_next <linux/err.h>
+
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void *ERR_CAST(const void *ptr)
+{
+	/* cast away the const */
+	return (void *) ptr;
+}
+
+#endif /* BACKPORT_LINUX_ERR_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,12 @@
+#ifndef _BACKPORT_LINUX_FILE_H_
+#define _BACKPORT_LINUX_FILE_H_
+
+#include_next <linux/file.h>
+#include <linux/fs.h>
+
+static inline void drop_file_write_access(struct file *filp)
+{
+	put_write_access(filp->f_path.dentry->d_inode);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/freezer.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/freezer.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/freezer.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_FREEZER_H
+#define BACKPORT_LINUX_FREEZER_H
+
+#include_next <linux/freezer.h>
+
+static inline void set_freezable(void) {}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,61 @@
+#ifndef BACKPORT_LINUX_FS_H
+#define BACKPORT_LINUX_FS_H
+
+#include_next <linux/fs.h>
+
+#define FILE_LOCK_DEFERRED 1
+
+#define ATTR_KILL_PRIV  (1 << 14)
+
+static inline void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
+{
+	new->fl_owner = fl->fl_owner;
+	new->fl_pid = fl->fl_pid;
+	new->fl_file = NULL;
+	new->fl_flags = fl->fl_flags;
+	new->fl_type = fl->fl_type;
+	new->fl_start = fl->fl_start;
+	new->fl_end = fl->fl_end;
+	new->fl_ops = NULL;
+	new->fl_lmops = NULL;
+}
+
+#define vfs_setlease(a, b, c) setlease(a, b, c)
+
+static inline int __mandatory_lock(struct inode *ino)
+{
+	return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
+}
+
+#define mandatory_lock(_args) MANDATORY_LOCK(_args)
+
+static inline int backport_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+{
+	return vfs_symlink(dir, dentry, oldname, 0);
+}
+
+#define vfs_symlink(_dir, _dentry, _oldname) backport_vfs_symlink(_dir, _dentry, _oldname)
+
+#ifdef CONFIG_DEBUG_WRITECOUNT
+static inline void file_take_write(struct file *f)
+{
+	WARN_ON(f->f_mnt_write_state != 0);
+	f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
+}
+#else
+static inline void file_take_write(struct file *filp) {}
+#endif
+
+static inline int inode_permission(struct inode *inode, int flags)
+{
+	return permission(inode, flags, NULL);
+}
+
+static inline int __mnt_is_readonly(struct vfsmount *mnt)
+{
+	if (mnt->mnt_sb->s_flags & MS_RDONLY)
+		return 1;
+	return 0;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/highmem.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/highmem.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/highmem.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,36 @@
+#ifndef LINUX_HIGHMEM_H
+#define LINUX_HIGHMEM_H
+
+#include_next <linux/highmem.h>
+
+static inline void zero_user_segments(struct page *page,
+	unsigned start1, unsigned end1,
+	unsigned start2, unsigned end2)
+{
+	void *kaddr = kmap_atomic(page, KM_USER0);
+
+	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+
+	if (end1 > start1)
+		memset(kaddr + start1, 0, end1 - start1);
+
+	if (end2 > start2)
+		memset(kaddr + start2, 0, end2 - start2);
+
+	kunmap_atomic(kaddr, KM_USER0);
+	flush_dcache_page(page);
+}
+
+static inline void zero_user_segment(struct page *page,
+	unsigned start, unsigned end)
+{
+	zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+	unsigned start, unsigned size)
+{
+	zero_user_segments(page, start, start + size, 0, 0);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/jiffies.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/jiffies.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/jiffies.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,10 @@
+#ifndef _JIFFIES_BACKPORT_H
+#define _JIFFIES_BACKPORT_H
+
+#include_next <linux/jiffies.h>
+
+#define time_in_range(a,b,c) \
+	(time_after_eq(a,b) && \
+	 time_before_eq(a,c))
+
+#endif /* _JIFFIES_BACKPORT_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/kernel.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/kernel.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/kernel.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,33 @@
+#ifndef BACKPORT_KERNEL_H_2_6_22
+#define BACKPORT_KERNEL_H_2_6_22
+
+#include_next <linux/kernel.h>
+
+#include <asm/errno.h>
+#include <asm/string.h>
+
+#define USHORT_MAX	((u16)(~0U))
+
+static inline int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
+{
+	char *tail;
+	unsigned long val;
+	size_t len;
+
+	*res = 0;
+	len = strlen(cp);
+	if (len == 0)
+		return -EINVAL;
+
+	val = simple_strtoul(cp, &tail, base);
+	if ((*tail == '\0') ||
+		((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
+		*res = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mm.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mm.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mm.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,6 +7,15 @@
 #include <asm/highmem.h>
 #endif
 
+#define VM_CAN_NONLINEAR 0x08000000     /* Has ->fault & does nonlinear pages */
+
 #define is_vmalloc_addr(x) ((unsigned long)(x) >= VMALLOC_START && (unsigned long)(x) < VMALLOC_END)
 
+struct shrinker {
+	shrinker_t		shrink;
+	struct list_head	list;
+	int			seeks;  /* seeks to recreate an obj */
+	long			nr;     /* objs pending delete */
+};
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mount.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mount.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/mount.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,11 @@
+#ifndef BACKPORT_LINUX_MOUNT_H
+#define BACKPORT_LINUX_MOUNT_H
+
+#include_next <linux/mount.h>
+#include <linux/fs.h>
+
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+extern int init_mnt_writers(void);
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/namei.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/namei.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/namei.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+#ifndef BACKPORT_LINUX_NAMEI_H
+#define BACKPORT_LINUX_NAMEI_H
+
+#include_next <linux/namei.h>
+#include <linux/audit.h>
+#include <linux/path.h>
+
+static inline int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
+		    const char *name, unsigned int flags,
+		    struct nameidata *nd)
+{
+	int retval;
+
+	/* same as do_path_lookup */
+	nd->last_type = LAST_ROOT;
+	nd->flags = flags;
+	nd->depth = 0;
+
+	nd->dentry = dentry;
+	nd->mnt = mnt;
+	backport_path_get(nd);
+
+	retval = path_walk(name, nd);
+	if (unlikely(!retval && !audit_dummy_context() && nd->dentry &&
+				nd->dentry->d_inode))
+		audit_inode(name, nd->dentry->d_inode);
+
+	return retval;
+}
+#endif /* BACKPORT_LINUX_NAMEI_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/net.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/net.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/net.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,18 @@
+#ifndef BACKPORT_LINUX_NET_H
+#define BACKPORT_LINUX_NET_H
+
+#include_next <linux/net.h>
+
+enum sock_shutdown_cmd {
+	SHUT_RD		= 0,
+	SHUT_WR		= 1,
+	SHUT_RDWR	= 2,
+};
+
+
+static inline int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd flags)
+{
+	return sock->ops->shutdown(sock, flags);
+}
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,5 +6,6 @@
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/pagemap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/pagemap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/pagemap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_LINUX_PAGEMAP_H
+#define BACKPORT_LINUX_PAGEMAP_H
+
+#include_next <linux/pagemap.h>
+
+#define __grab_cache_page	grab_cache_page
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/path.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/path.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/path.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+#ifndef _BACKPORT_LINUX_PATH_H
+#define _BACKPORT_LINUX_PATH_H
+
+#include <linux/mount.h>
+
+static inline void path_put(struct path *path)
+{
+	dput(path->dentry);
+	mntput(path->mnt);
+}
+
+static inline void path_get(struct path *path)
+{
+	mntget(path->mnt);
+	dget(path->dentry);
+}
+
+static inline void backport_path_put(struct nameidata *nd)
+{
+	dput(nd->dentry);
+	mntput(nd->mnt);
+}
+
+static inline void backport_path_get(struct nameidata *nd)
+{
+	mntget(nd->mnt);
+	dget(nd->dentry);
+}
+
+#endif  /* _BACKPORT_LINUX_PATH_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/proc_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/proc_fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/proc_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+#ifndef BACKPORT_LINUX_PROC_FS_H
+#define BACKPORT_LINUX_PROC_FS_H
+
+#include_next <linux/proc_fs.h>
+
+static inline struct proc_dir_entry *proc_create(const char *name,
+	mode_t mode, struct proc_dir_entry *parent,
+	const struct file_operations *fops)
+{
+	struct proc_dir_entry *res = create_proc_entry(name, mode, parent);
+	if (res)
+		res->proc_fops = fops;
+	return res;
+}
+
+static inline struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+				struct proc_dir_entry *parent,
+				const struct file_operations *proc_fops,
+				void *data)
+{
+		struct proc_dir_entry *pde;
+
+		pde = proc_create(name, mode, parent, proc_fops);
+		if (pde)
+			pde->data = data;
+
+		return pde;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/radix-tree.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/radix-tree.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/radix-tree.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+#ifndef BACKPORT_LINUX_RADIX_TREE_H
+#define BACKPORT_LINUX_RADIX_TREE_H
+
+#include_next <linux/radix-tree.h>
+
+static inline int backport_radix_tree_preload(gfp_t gfp_mask)
+{
+	return 0;
+}
+
+#define radix_tree_preload backport_radix_tree_preload
+
+static inline void backport_radix_tree_preload_end(void)
+{
+}
+
+#define radix_tree_preload_end backport_radix_tree_preload_end
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/scatterlist.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/scatterlist.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/scatterlist.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,6 +15,10 @@
 	sg->page = page;
 }
 
+static inline void sg_mark_end(struct scatterlist *sg)
+{
+}
+
 #define sg_page(a) (a)->page
 #define sg_init_table(a, b)
 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/sched.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/sched.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/sched.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+#ifndef LINUX_SCHED_BACKPORT_H
+#define LINUX_SCHED_BACKPORT_H
+
+#include_next <linux/sched.h>
+
+#define TASK_WAKEKILL	   128
+
+#define TASK_KILLABLE	   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+
+#define schedule_timeout_killable(_arg) schedule_timeout_interruptible(_arg)
+
+static inline int __fatal_signal_pending(struct task_struct *tsk)
+{
+	return sigismember(&tsk->pending.signal, SIGKILL);
+}
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+	return signal_pending(p) && __fatal_signal_pending(p);
+}
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+	return tsk->pid;
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/security.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/security.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/security.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,55 @@
+#ifndef BACKPORT_LINUX_SECURITY_H
+#define BACKPORT_LINUX_SECURITY_H
+
+#include_next <linux/security.h>
+
+struct security_mnt_opts {
+	char **mnt_opts;
+	int *mnt_opts_flags;
+	int num_mnt_opts;
+};
+
+static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+{
+	opts->mnt_opts = NULL;
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+{
+	int i;
+	if (opts->mnt_opts)
+		for (i = 0; i < opts->num_mnt_opts; i++)
+			kfree(opts->mnt_opts[i]);
+	kfree(opts->mnt_opts);
+	opts->mnt_opts = NULL;
+	kfree(opts->mnt_opts_flags);
+	opts->mnt_opts_flags = NULL;
+	opts->num_mnt_opts = 0;
+}
+
+static inline int security_sb_set_mnt_opts(struct super_block *sb,
+					   struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+					      struct super_block *newsb)
+{ }
+
+static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+{
+	return 0;
+}
+
+static inline int backport_security_sb_copy_data(void *orig, void *copy)
+{
+	return 0;
+	//return security_sb_copy_data(NULL, orig, copy);
+}
+
+#define security_sb_copy_data(a,b) backport_security_sb_copy_data(a,b)
+
+#endif /* BACKPORT_LINUX_SECURITY_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/seq_file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/seq_file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/seq_file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef BACKPORT_LINUX_SEQ_FILE_H
+#define BACKPORT_LINUX_SEQ_FILE_H
+
+#include_next <linux/seq_file.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+
+static inline struct list_head *seq_list_start(struct list_head *head, loff_t pos)
+{
+	struct list_head *lh;
+
+	list_for_each(lh, head)
+		if (pos-- == 0)
+			return lh;
+
+	return NULL;
+}
+
+static inline struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
+{
+	if (!pos)
+		return head;
+
+	return seq_list_start(head, pos - 1);
+}
+
+static inline struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
+{
+	struct list_head *lh;
+
+	lh = ((struct list_head *)v)->next;
+	++*ppos;
+	return lh == head ? NULL : lh;
+}
+
+static inline void *__seq_open_private(struct file *f, const struct seq_operations *ops,
+		int psize)
+{
+	int rc;
+	void *private;
+	struct seq_file *seq;
+
+	private = kzalloc(psize, GFP_KERNEL);
+	if (private == NULL)
+		goto out;
+
+	rc = seq_open(f, ops);
+	if (rc < 0)
+		goto out_free;
+
+	seq = f->private_data;
+	seq->private = private;
+	return private;
+
+out_free:
+	kfree(private);
+out:
+	return NULL;
+}
+
+static inline int seq_open_private(struct file *filp, const struct seq_operations *ops,
+		int psize)
+{
+	return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/string.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/string.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/string.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+#ifndef BACKPORT_LINUX_STRING_H
+#define BACKPORT_LINUX_STRING_H
+
+#include_next <linux/string.h>
+
+extern void *__kmalloc(size_t, gfp_t);
+
+static inline char *kstrndup(const char *s, size_t max, gfp_t gfp)
+{
+	size_t len;
+	char *buf;
+
+	if (!s)
+		return NULL;
+
+	len = strnlen(s, max);
+	buf = __kmalloc(len+1, gfp);
+	if (buf) {
+		memcpy(buf, s, len);
+		buf[len] = '\0';
+	}
+	return buf;
+}
+#endif /* BACKPORT_LINUX_STRING_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/swap.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/swap.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/swap.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+#ifndef LINUX_SWAP_BACKPORT_H
+#define LINUX_SWAP_BACKPORT_H
+
+#include_next <linux/swap.h>
+
+static inline unsigned int backport_nr_free_buffer_pages(void)
+{
+	/* Just pick one node, since fallback list is circular */
+	pg_data_t *pgdat = NODE_DATA(numa_node_id());
+	unsigned int sum = 0;
+
+	struct zonelist *zonelist = pgdat->node_zonelists + gfp_zone(GFP_USER);
+	struct zone **zonep = zonelist->zones;
+	struct zone *zone;
+
+	for (zone = *zonep++; zone; zone = *zonep++) {
+		unsigned long size = zone->present_pages;
+		unsigned long high = zone->pages_high;
+		if (size > high)
+			sum += size - high;
+	}
+
+	return sum;
+}
+
+#define nr_free_buffer_pages backport_nr_free_buffer_pages
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/wait.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/wait.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/wait.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,47 @@
+#ifndef BACKPORT_LINUX_WAIT_H
+#define BACKPORT_LINUX_WAIT_H
+
+#include_next <linux/wait.h>
+
+#define __wait_event_killable(wq, condition, ret)		\
+do {								\
+	DEFINE_WAIT(__wait);					\
+								\
+	for (;;) {						\
+		prepare_to_wait(&wq, &__wait, TASK_KILLABLE);	\
+		if (condition)					\
+			break;					\
+		if (!fatal_signal_pending(current)) {		\
+			schedule();				\
+			continue;				\
+		}						\
+		ret = -ERESTARTSYS;				\
+		break;						\
+	}							\
+	finish_wait(&wq, &__wait);				\
+} while (0)
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition)			\
+({								\
+	int __ret = 0;						\
+	if (!(condition))					\
+		__wait_event_killable(wq, condition, __ret);	\
+	__ret;							\
+})
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/workqueue.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/workqueue.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/linux/workqueue.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,7 +3,6 @@
 
 #include_next <linux/workqueue.h>
 
-#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) INIT_DELAYED_WORK(_work, _func)
 #define cancel_delayed_work_sync cancel_delayed_work
 
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/ipv6.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/ipv6.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/ipv6.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+#ifndef BACKPORT_NET_IPV6_H
+#define BACKPORT_NET_IPV6_H
+
+#include_next <net/ipv6.h>
+
+static inline void ipv6_addr_set_v4mapped(const __be32 addr,
+					  struct in6_addr *v4mapped)
+{
+	ipv6_addr_set(v4mapped,
+			0, 0,
+			htonl(0x0000FFFF),
+			addr);
+}
+
+static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+	return ((a->s6_addr32[0] | a->s6_addr32[1] |
+		(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0);
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/udp.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/udp.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/net/udp.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,9 @@
+#ifndef BACKPORT_NET_UDP_H
+#define BACKPORT_NET_UDP_H
+
+#include_next <net/udp.h>
+
+static inline void UDPX_INC_STATS_BH(struct sock *sk, int field)
+{ }
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/src/namespace.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/src/namespace.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22/include/src/namespace.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,95 @@
+#include <linux/spinlock_types.h>
+#include <linux/percpu.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+
+struct mnt_writer {
+	/*
+	 * If holding multiple instances of this lock, they
+	 * must be ordered by cpu number.
+	 */
+	spinlock_t lock;
+	struct lock_class_key lock_class; /* compiles out with !lockdep */
+	unsigned long count;
+	struct vfsmount *mnt;
+} ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+
+int __init init_mnt_writers(void)
+{
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
+		spin_lock_init(&writer->lock);
+		lockdep_set_class(&writer->lock, &writer->lock_class);
+		writer->count = 0;
+	}
+	return 0;
+}
+
+static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
+{
+	if (!cpu_writer->mnt)
+		return;
+	/*
+	 * This is in case anyone ever leaves an invalid,
+	 * old ->mnt and a count of 0.
+	 */
+	if (!cpu_writer->count)
+		return;
+	cpu_writer->count = 0;
+}
+
+static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
+					  struct vfsmount *mnt)
+{
+	if (cpu_writer->mnt == mnt)
+		return;
+	__clear_mnt_count(cpu_writer);
+	cpu_writer->mnt = mnt;
+}
+
+int mnt_want_write(struct vfsmount *mnt)
+{
+	int ret = 0;
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+	if (__mnt_is_readonly(mnt)) {
+		ret = -EROFS;
+		goto out;
+	}
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	cpu_writer->count++;
+out:
+	spin_unlock(&cpu_writer->lock);
+	put_cpu_var(mnt_writers);
+	return ret;
+}
+EXPORT_SYMBOL(mnt_want_write);
+
+void mnt_drop_write(struct vfsmount *mnt)
+{
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	if (cpu_writer->count > 0) {
+		cpu_writer->count--;
+	}
+
+	spin_unlock(&cpu_writer->lock);
+	/*
+	 * This could be done right after the spinlock
+	 * is taken because the spinlock keeps us on
+	 * the cpu, and disables preemption.  However,
+	 * putting it here bounds the amount that
+	 * __mnt_writers can underflow.  Without it,
+	 * we could theoretically wrap __mnt_writers.
+	 */
+	put_cpu_var(mnt_writers);
+}
+EXPORT_SYMBOL(mnt_drop_write);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22_suse10_3/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22_suse10_3/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.22_suse10_3/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,5 +6,6 @@
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/asm/unaligned.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/asm/unaligned.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/asm/unaligned.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+#define ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+
+#include <linux/unaligned/access_ok.h>
+#include_next <asm/unaligned.h>
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/if_vlan.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/if_vlan.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/if_vlan.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,11 +3,8 @@
 
 #include_next <linux/if_vlan.h>
 
-#define vlan_dev_info(x) VLAN_DEV_INFO(x)
+#define VLAN_DEV_INFO(x) ((struct vlan_dev_info *)(x->priv))
+#define vlan_dev_real_dev(netdev) (VLAN_DEV_INFO(netdev)->real_dev)
+#define vlan_dev_vlan_id(netdev) (VLAN_DEV_INFO(netdev)->vlan_id)
 
-static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
-{
-	return vlan_dev_info(dev)->vlan_id;
-}
-
-#endif
+#endif /* __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_24__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/unaligned/access_ok.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/unaligned/access_ok.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.24/include/linux/unaligned/access_ok.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/asm/unaligned.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/asm/unaligned.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/asm/unaligned.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,7 @@
+#ifndef ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+#define ASM_UNALIGNED_BACKPORT_TO_2_6_27_H
+
+#include <linux/unaligned/access_ok.h>
+#include_next <asm/unaligned.h>
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/backing-dev.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/backing-dev.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/backing-dev.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,22 @@
+#ifndef BACKPORT_LINUX_BACK_DEV_H
+#define BACKPORT_LINUX_BACK_DEV_H
+
+#include_next <linux/backing-dev.h>
+
+static inline int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+				const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+{
+	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
+}
+
+static inline void bdi_unregister(struct backing_dev_info *bdi)
+{
+	return;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/file.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/file.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/file.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,12 @@
+#ifndef BACKPORT_LINUX_FILE_H
+#define BACKPORT_LINUX_FILE_H
+
+#include_next <linux/file.h>
+#include <linux/fs.h>
+
+static inline void drop_file_write_access(struct file *filp)
+{
+	put_write_access(filp->f_path.dentry->d_inode);
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,34 @@
+#ifndef BACKPORT_LINUX_FS_H
+#define BACKPORT_LINUX_FS_H
+
+#include_next <linux/fs.h>
+
+#define __locks_copy_lock locks_copy_lock
+#define FILE_LOCK_DEFERRED -EINPROGRESS
+
+
+static inline int inode_permission(struct inode *inode, int flags)
+{
+	return permission(inode, flags, NULL);
+}
+
+static inline int backport_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+{
+	return vfs_symlink(dir, dentry, oldname, 0);
+}
+
+#ifdef CONFIG_DEBUG_WRITECOUNT
+static inline void file_take_write(struct file *f)
+{
+	WARN_ON(f->f_mnt_write_state != 0);
+	f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
+}
+#else
+static inline void file_take_write(struct file *filp) {}
+#endif
+
+
+#define vfs_symlink(_dir, _dentry, _oldname) backport_vfs_symlink(_dir, _dentry, _oldname)
+
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/if_vlan.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/if_vlan.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/if_vlan.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,11 +1,10 @@
-#ifndef __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_26__
-#define __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_26__
+#ifndef __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_25__
+#define __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_25__
 
 #include_next <linux/if_vlan.h>
 
-static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
-{
-	return vlan_dev_info(dev)->vlan_id;
-}
+#define VLAN_DEV_INFO(x) ((struct vlan_dev_info *)(x->priv))
+#define vlan_dev_real_dev(netdev) (VLAN_DEV_INFO(netdev)->real_dev)
+#define vlan_dev_vlan_id(netdev) (VLAN_DEV_INFO(netdev)->vlan_id)
 
-#endif /* __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_26__ */
+#endif /* __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_25__ */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/kernel.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/kernel.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/kernel.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,8 @@
+#ifndef BACKPORT_KERNEL_H_2_6_25
+#define BACKPORT_KERNEL_H_2_6_25
+
+#include_next <linux/kernel.h>
+
+#define USHORT_MAX     ((u16)(~0U))
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/mount.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/mount.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/mount.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+#ifndef BACKPORT_LINUX_MOUNT_H
+#define BACKPORT_LINUX_MOUNT_H
+
+#include_next <linux/mount.h>
+#include <linux/fs.h>
+
+static inline int __mnt_is_readonly(struct vfsmount *mnt)
+{
+	if (mnt->mnt_sb->s_flags & MS_RDONLY)
+		return 1;
+	return 0;
+}
+
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/proc_fs.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/proc_fs.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/proc_fs.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+#ifndef BACKPORT_LINUX_PROC_FS_H
+#define BACKPORT_LINUX_PROC_FS_H
+
+#include_next <linux/proc_fs.h>
+
+static inline struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+				struct proc_dir_entry *parent,
+				const struct file_operations *proc_fops,
+				void *data)
+{
+		struct proc_dir_entry *pde;
+
+		pde = proc_create(name, mode, parent, proc_fops);
+		if (pde)
+			pde->data = data;
+
+		return pde;
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/unaligned/access_ok.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/unaligned/access_ok.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/linux/unaligned/access_ok.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/net/ipv6.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/net/ipv6.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/net/ipv6.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,14 @@
+#ifndef BACKPORT_LINUX_IPV6_H
+#define BACKPORT_LINUX_IPV6_H
+
+#include_next <net/ipv6.h>
+
+static inline void ipv6_addr_set_v4mapped(const __be32 addr,
+					  struct in6_addr *v4mapped)
+{
+	ipv6_addr_set(v4mapped,
+			0, 0,
+			htonl(0x0000FFFF),
+			addr);
+}
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/src/namespace.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/src/namespace.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.25/include/src/namespace.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,83 @@
+#include <linux/spinlock_types.h>
+#include <linux/percpu.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+
+struct mnt_writer {
+	/*
+	 * If holding multiple instances of this lock, they
+	 * must be ordered by cpu number.
+	 */
+	spinlock_t lock;
+	struct lock_class_key lock_class; /* compiles out with !lockdep */
+	unsigned long count;
+	struct vfsmount *mnt;
+} ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+
+static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
+{
+	if (!cpu_writer->mnt)
+		return;
+	/*
+	 * This is in case anyone ever leaves an invalid,
+	 * old ->mnt and a count of 0.
+	 */
+	if (!cpu_writer->count)
+		return;
+	cpu_writer->count = 0;
+}
+
+static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
+					  struct vfsmount *mnt)
+{
+	if (cpu_writer->mnt == mnt)
+		return;
+	__clear_mnt_count(cpu_writer);
+	cpu_writer->mnt = mnt;
+}
+
+int mnt_want_write(struct vfsmount *mnt)
+{
+	int ret = 0;
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+	if (__mnt_is_readonly(mnt)) {
+		ret = -EROFS;
+		goto out;
+	}
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	cpu_writer->count++;
+out:
+	spin_unlock(&cpu_writer->lock);
+	put_cpu_var(mnt_writers);
+	return ret;
+}
+EXPORT_SYMBOL(mnt_want_write);
+
+void mnt_drop_write(struct vfsmount *mnt)
+{
+	struct mnt_writer *cpu_writer;
+
+	cpu_writer = &get_cpu_var(mnt_writers);
+	spin_lock(&cpu_writer->lock);
+
+	use_cpu_writer_for_mount(cpu_writer, mnt);
+	if (cpu_writer->count > 0) {
+		cpu_writer->count--;
+	}
+
+	spin_unlock(&cpu_writer->lock);
+	/*
+	 * This could be done right after the spinlock
+	 * is taken because the spinlock keeps us on
+	 * the cpu, and disables preemption.  However,
+	 * putting it here bounds the amount that
+	 * __mnt_writers can underflow.  Without it,
+	 * we could theoretically wrap __mnt_writers.
+	 */
+	put_cpu_var(mnt_writers);
+}
+EXPORT_SYMBOL(mnt_drop_write);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.26/include/linux/if_vlan.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.26/include/linux/if_vlan.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.26/include/linux/if_vlan.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,9 +3,8 @@
 
 #include_next <linux/if_vlan.h>
 
-static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
-{
-	return vlan_dev_info(dev)->vlan_id;
-}
+#define VLAN_DEV_INFO(x) ((struct vlan_dev_info *)(x->priv))
+#define vlan_dev_real_dev(netdev) (VLAN_DEV_INFO(netdev)->real_dev)
+#define vlan_dev_vlan_id(netdev) (VLAN_DEV_INFO(netdev)->vlan_id)
 
 #endif /* __BACKPORT_LINUX_IF_VLAN_H_TO_2_6_26__ */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/inetdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -9,16 +9,24 @@
 static inline struct net_device *xxx_ip_dev_find(u32 addr)
 {
 	struct net_device *dev;
-	u32 ip;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	struct in_device *in_dev;
 
 	read_lock(&dev_base_lock);
-	for (dev = dev_base; dev; dev = dev->next) {
-		ip = inet_select_addr(dev, 0, RT_SCOPE_HOST);
-		if (ip == addr) {
-			dev_hold(dev);
-			break;
+	for (dev = dev_base; dev; dev = dev->next)
+		if ((in_dev = in_dev_get(dev))) {
+			for (ifap = &in_dev->ifa_list; (ifa = *ifap);
+			     ifap = &ifa->ifa_next) {
+				if (addr == ifa->ifa_address) {
+					dev_hold(dev);
+					in_dev_put(in_dev);
+					goto found;
+				}
+			}
+			in_dev_put(in_dev);
 		}
-	}
+found:
 	read_unlock(&dev_base_lock);
 
 	return dev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U4/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -45,6 +45,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/inetdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -9,16 +9,24 @@
 static inline struct net_device *xxx_ip_dev_find(u32 addr)
 {
 	struct net_device *dev;
-	u32 ip;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	struct in_device *in_dev;
 
 	read_lock(&dev_base_lock);
-	for (dev = dev_base; dev; dev = dev->next) {
-		ip = inet_select_addr(dev, 0, RT_SCOPE_HOST);
-		if (ip == addr) {
-			dev_hold(dev);
-			break;
+	for (dev = dev_base; dev; dev = dev->next)
+		if ((in_dev = in_dev_get(dev))) {
+			for (ifap = &in_dev->ifa_list; (ifa = *ifap);
+			     ifap = &ifa->ifa_next) {
+				if (addr == ifa->ifa_address) {
+					dev_hold(dev);
+					in_dev_put(in_dev);
+					goto found;
+				}
+			}
+			in_dev_put(in_dev);
 		}
-	}
+found:
 	read_unlock(&dev_base_lock);
 
 	return dev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U5/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -45,6 +45,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/inetdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -9,16 +9,24 @@
 static inline struct net_device *xxx_ip_dev_find(u32 addr)
 {
 	struct net_device *dev;
-	u32 ip;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	struct in_device *in_dev;
 
 	read_lock(&dev_base_lock);
-	for (dev = dev_base; dev; dev = dev->next) {
-		ip = inet_select_addr(dev, 0, RT_SCOPE_HOST);
-		if (ip == addr) {
-			dev_hold(dev);
-			break;
+	for (dev = dev_base; dev; dev = dev->next)
+		if ((in_dev = in_dev_get(dev))) {
+			for (ifap = &in_dev->ifa_list; (ifa = *ifap);
+			     ifap = &ifa->ifa_next) {
+				if (addr == ifa->ifa_address) {
+					dev_hold(dev);
+					in_dev_put(in_dev);
+					goto found;
+				}
+			}
+			in_dev_put(in_dev);
 		}
-	}
+found:
 	read_unlock(&dev_base_lock);
 
 	return dev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U6/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -45,6 +45,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/inetdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/inetdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/inetdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -9,16 +9,24 @@
 static inline struct net_device *xxx_ip_dev_find(u32 addr)
 {
 	struct net_device *dev;
-	u32 ip;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	struct in_device *in_dev;
 
 	read_lock(&dev_base_lock);
-	for (dev = dev_base; dev; dev = dev->next) {
-		ip = inet_select_addr(dev, 0, RT_SCOPE_HOST);
-		if (ip == addr) {
-			dev_hold(dev);
-			break;
+	for (dev = dev_base; dev; dev = dev->next)
+		if ((in_dev = in_dev_get(dev))) {
+			for (ifap = &in_dev->ifa_list; (ifa = *ifap);
+			     ifap = &ifa->ifa_next) {
+				if (addr == ifa->ifa_address) {
+					dev_hold(dev);
+					in_dev_put(in_dev);
+					goto found;
+				}
+			}
+			in_dev_put(in_dev);
 		}
-	}
+found:
 	read_unlock(&dev_base_lock);
 
 	return dev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/netdevice.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/netdevice.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_addons/backport/2.6.9_U7/include/linux/netdevice.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -45,6 +45,7 @@
 
 #define NETIF_F_TSO6    0
 #define NETIF_F_LRO		32768   /* large receive offload */
+#define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */
 
 #define dev_get_by_name(net, name) dev_get_by_name(name)
 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,13 +5,13 @@
 
 Signed-off-by: Doron Shoham <dorons at voltaire.com>
 ---
- drivers/scsi/scsi_transport_iscsi.c |   95 ++++++++++++++++++++----------------
- 1 file changed, 55 insertions(+), 40 deletions(-)
+ drivers/scsi/scsi_transport_iscsi.c |   97 +++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 40 deletions(-)
 
-Index: ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+Index: ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 ===================================================================
---- ofed_kernel.orig/drivers/scsi/scsi_transport_iscsi.c
-+++ ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+--- ofa_kernel-1.4.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 @@ -20,6 +20,8 @@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -21,7 +21,18 @@
  #include <linux/module.h>
  #include <linux/mutex.h>
  #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(stru
+@@ -378,8 +380,10 @@ static void __iscsi_unblock_session(stru
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ 	struct iscsi_host *ihost = shost->shost_data;
++#endif
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -397,10 +401,12 @@ static void __iscsi_unblock_session(stru
  	 * the async scanning code (drivers like iscsi_tcp do login and
  	 * scanning from userspace).
  	 */
@@ -38,7 +49,7 @@
  }
  
  /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+@@ -1294,45 +1300,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
   * Malformed skbs with wrong lengths or invalid creds are not processed.
   */
  static void
@@ -129,7 +140,7 @@
  	}
  	mutex_unlock(&rx_queue_mutex);
  }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(v
+@@ -1738,7 +1755,7 @@ static __init int iscsi_transport_init(v
  	return 0;
  
  release_nls:
@@ -138,7 +149,7 @@
  unregister_session_class:
  	transport_class_unregister(&iscsi_session_class);
  unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
+@@ -1753,7 +1770,7 @@ unregister_transport_class:
  static void __exit iscsi_transport_exit(void)
  {
  	destroy_workqueue(iscsi_eh_timer_workq);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,13 +5,13 @@
 
 Signed-off-by: Doron Shoham <dorons at voltaire.com>
 ---
- drivers/scsi/scsi_transport_iscsi.c |   95 ++++++++++++++++++++----------------
- 1 file changed, 55 insertions(+), 40 deletions(-)
+ drivers/scsi/scsi_transport_iscsi.c |   97 +++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 40 deletions(-)
 
-Index: ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+Index: ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 ===================================================================
---- ofed_kernel.orig/drivers/scsi/scsi_transport_iscsi.c
-+++ ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+--- ofa_kernel-1.4.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 @@ -20,6 +20,8 @@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -21,7 +21,18 @@
  #include <linux/module.h>
  #include <linux/mutex.h>
  #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(stru
+@@ -378,8 +380,10 @@ static void __iscsi_unblock_session(stru
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ 	struct iscsi_host *ihost = shost->shost_data;
++#endif
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -397,10 +401,12 @@ static void __iscsi_unblock_session(stru
  	 * the async scanning code (drivers like iscsi_tcp do login and
  	 * scanning from userspace).
  	 */
@@ -38,7 +49,7 @@
  }
  
  /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+@@ -1294,45 +1300,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
   * Malformed skbs with wrong lengths or invalid creds are not processed.
   */
  static void
@@ -129,7 +140,7 @@
  	}
  	mutex_unlock(&rx_queue_mutex);
  }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(v
+@@ -1738,7 +1755,7 @@ static __init int iscsi_transport_init(v
  	return 0;
  
  release_nls:
@@ -138,7 +149,7 @@
  unregister_session_class:
  	transport_class_unregister(&iscsi_session_class);
  unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
+@@ -1753,7 +1770,7 @@ unregister_transport_class:
  static void __exit iscsi_transport_exit(void)
  {
  	destroy_workqueue(iscsi_eh_timer_workq);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp1/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,13 +5,13 @@
 
 Signed-off-by: Doron Shoham <dorons at voltaire.com>
 ---
- drivers/scsi/scsi_transport_iscsi.c |   95 ++++++++++++++++++++----------------
- 1 file changed, 55 insertions(+), 40 deletions(-)
+ drivers/scsi/scsi_transport_iscsi.c |   97 +++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 40 deletions(-)
 
-Index: ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+Index: ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 ===================================================================
---- ofed_kernel.orig/drivers/scsi/scsi_transport_iscsi.c
-+++ ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+--- ofa_kernel-1.4.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 @@ -20,6 +20,8 @@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -21,7 +21,18 @@
  #include <linux/module.h>
  #include <linux/mutex.h>
  #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(stru
+@@ -378,8 +380,10 @@ static void __iscsi_unblock_session(stru
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ 	struct iscsi_host *ihost = shost->shost_data;
++#endif
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -397,10 +401,12 @@ static void __iscsi_unblock_session(stru
  	 * the async scanning code (drivers like iscsi_tcp do login and
  	 * scanning from userspace).
  	 */
@@ -38,7 +49,7 @@
  }
  
  /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+@@ -1294,45 +1300,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
   * Malformed skbs with wrong lengths or invalid creds are not processed.
   */
  static void
@@ -129,7 +140,7 @@
  	}
  	mutex_unlock(&rx_queue_mutex);
  }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(v
+@@ -1738,7 +1755,7 @@ static __init int iscsi_transport_init(v
  	return 0;
  
  release_nls:
@@ -138,7 +149,7 @@
  unregister_session_class:
  	transport_class_unregister(&iscsi_session_class);
  unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
+@@ -1753,7 +1770,7 @@ unregister_transport_class:
  static void __exit iscsi_transport_exit(void)
  {
  	destroy_workqueue(iscsi_eh_timer_workq);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_fs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_fs.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_fs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,5 +1,48 @@
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index cc91227..3433497 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -280,7 +280,7 @@ static int get_name(struct vfsmount *mnt, struct dentry *dentry,
+ 	while (1) {
+ 		int old_seq = buffer.sequence;
+ 
+-		error = vfs_readdir(file, filldir_one, &buffer);
++		error = vfs_readdir(file, (filldir_t)filldir_one, &buffer);
+ 
+ 		if (error < 0)
+ 			break;
+@@ -361,11 +361,14 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	const struct export_operations *nop = mnt->mnt_sb->s_export_op;
+ 	struct dentry *result, *alias;
+ 	int err;
++	__u32 objp[2];
+ 
++	objp[0] = fid->i32.ino;
++	objp[1] = fid->i32.gen;
+ 	/*
+ 	 * Try to get any dentry for the given file handle from the filesystem.
+ 	 */
+-	result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
++	result = nop->get_dentry(mnt->mnt_sb, &objp);
+ 	if (!result)
+ 		result = ERR_PTR(-ESTALE);
+ 	if (IS_ERR(result))
+@@ -417,11 +420,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 		 * file handle.  If this fails we'll have to give up.
+ 		 */
+ 		err = -ESTALE;
+-		if (!nop->fh_to_parent)
++		if (!nop->get_parent)
+ 			goto err_result;
+ 
+-		target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
+-				fh_len, fileid_type);
++		target_dir = nop->get_parent(result);
+ 		if (!target_dir)
+ 			goto err_result;
+ 		err = PTR_ERR(target_dir);
 diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
-index 0b45fd3..04761ed 100644
+index 0b45fd3..2c45814 100644
 --- a/fs/lockd/clntlock.c
 +++ b/fs/lockd/clntlock.c
 @@ -168,7 +168,7 @@ __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock
@@ -7,7 +50,7 @@
  		if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
  			continue;
 -		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
-+		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode) ,fh) != 0)
++		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode), fh) != 0)
  			continue;
  		/* Alright, we found a lock. Set the return status
  		 * and wake up the caller
@@ -31,21 +74,21 @@
  	lock->svid = fl->fl_u.nfs_fl.owner->pid;
  	lock->fl.fl_start = fl->fl_start;
  	lock->fl.fl_end = fl->fl_end;
-diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
-index 5bd9bf0..e74ecae 100644
---- a/fs/lockd/svc.c
-+++ b/fs/lockd/svc.c
-@@ -506,7 +506,7 @@ module_param(nsm_use_hostnames, bool, 0644);
- static int __init init_nlm(void)
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index e4d5635..771edc1 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -194,7 +194,7 @@ static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
+  */
+ static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
  {
- #ifdef CONFIG_SYSCTL
--	nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root);
-+	nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root,0);
- 	return nlm_sysctl_table ? 0 : -ENOMEM;
- #else
- 	return 0;
+-	p = xdr_encode_nsm_string(p, utsname()->nodename);
++	p = xdr_encode_nsm_string(p, system_utsname.nodename);
+ 	if (!p)
+ 		return ERR_PTR(-EIO);
+ 
 diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
-index cf0d5c2..9d98bfe 100644
+index cf0d5c2..a353cf5 100644
 --- a/fs/lockd/svclock.c
 +++ b/fs/lockd/svclock.c
 @@ -304,7 +304,7 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
@@ -68,29 +111,38 @@
  				lock->fl.fl_type, lock->fl.fl_pid,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end,
-@@ -418,8 +418,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+@@ -417,11 +417,18 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			ret = nlm_granted;
  			goto out;
  		case -EAGAIN:
- 			ret = nlm_lck_denied;
+-			ret = nlm_lck_denied;
 -			goto out;
--		case FILE_LOCK_DEFERRED:
-+			break;
-+		case -EINPROGRESS:
- 			if (wait)
++			if (wait) {
++				ret = nlm_lck_blocked;
++				break;
++			} else {
++				ret = nlm_lck_denied;
++				goto out;
++			}
+ 		case FILE_LOCK_DEFERRED:
+-			if (wait)
++			if (wait) {
++				ret = nlm_lck_blocked;
  				break;
++			}
  			/* Filesystem lock operation is in progress
-@@ -434,6 +434,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			   Add it to the queue waiting for callback */
+ 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
+@@ -434,8 +441,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
  			goto out;
  	}
  
-+	ret = nlm_lck_denied;
-+	if (!wait)
-+		goto out;
-+
- 	ret = nlm_lck_blocked;
- 
+-	ret = nlm_lck_blocked;
+-
  	/* Append to list of blocked */
-@@ -458,8 +462,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 	nlmsvc_insert_block(block, NLM_NEVER);
+ out:
+@@ -458,8 +463,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
  	__be32			ret;
  
  	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
@@ -101,16 +153,7 @@
  				lock->fl.fl_type,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end);
-@@ -503,7 +507,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
- 	}
- 
- 	error = vfs_test_lock(file->f_file, &lock->fl);
--	if (error == FILE_LOCK_DEFERRED) {
-+	if (error == -EINPROGRESS) {
- 		ret = nlmsvc_defer_lock_rqst(rqstp, block);
- 		goto out;
- 	}
-@@ -547,8 +551,8 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
+@@ -547,8 +552,8 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
  	int	error;
  
  	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
@@ -121,7 +164,7 @@
  				lock->fl.fl_pid,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end);
-@@ -576,8 +580,8 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
+@@ -576,8 +581,8 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
  	int status = 0;
  
  	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
@@ -132,25 +175,6 @@
  				lock->fl.fl_pid,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end);
-@@ -615,7 +619,7 @@ nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
- 		block->b_flags |= B_TIMED_OUT;
- 	if (conf) {
- 		if (block->b_fl)
--			__locks_copy_lock(block->b_fl, conf);
-+			locks_copy_lock(block->b_fl, conf);
- 	}
- }
- 
-@@ -727,7 +731,8 @@ nlmsvc_grant_blocked(struct nlm_block *block)
- 	switch (error) {
- 	case 0:
- 		break;
--	case FILE_LOCK_DEFERRED:
-+	case -EAGAIN:
-+	case -EINPROGRESS:
- 		dprintk("lockd: lock still blocked error %d\n", error);
- 		nlmsvc_insert_block(block, NLM_NEVER);
- 		nlmsvc_release_block(block);
 diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
 index 198b4e5..2109091 100644
 --- a/fs/lockd/svcsubs.c
@@ -173,240 +197,84 @@
  }
  
  /**
-diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
-index e4d5635..771edc1 100644
---- a/fs/lockd/mon.c
-+++ b/fs/lockd/mon.c
-@@ -194,7 +194,7 @@ static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
-  */
- static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
- {
--	p = xdr_encode_nsm_string(p, utsname()->nodename);
-+	p = xdr_encode_nsm_string(p, system_utsname.nodename);
- 	if (!p)
- 		return ERR_PTR(-EIO);
+diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
+index ac6170c..16d3d17 100644
+--- a/fs/nfs/Makefile
++++ b/fs/nfs/Makefile
+@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
  
-diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
-index 24241fc..1a04f19 100644
---- a/fs/nfs/internal.h
-+++ b/fs/nfs/internal.h
-@@ -3,7 +3,6 @@
-  */
- 
- #include <linux/mount.h>
--#include <linux/security.h>
- 
- struct nfs_string;
- 
-@@ -59,8 +58,6 @@ struct nfs_parsed_mount_data {
- 		unsigned short		port;
- 		unsigned short		protocol;
- 	} nfs_server;
--
--	struct security_mnt_opts lsm_opts;
- };
- 
- /* client.c */
-@@ -216,9 +213,9 @@ unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
- /*
-  * Calculate the number of 512byte blocks used.
-  */
--static inline blkcnt_t nfs_calc_block_size(u64 tsize)
-+static inline unsigned long nfs_calc_block_size(u64 tsize)
- {
--	blkcnt_t used = (tsize + 511) >> 9;
-+	loff_t used = (tsize + 511) >> 9;
- 	return (used > ULONG_MAX) ? ULONG_MAX : used;
- }
- 
+ nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
+ 			   direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+-			   write.o namespace.o mount_clnt.o
++			   write.o namespace.o mount_clnt.o \
++			   backport-namespace.o backport-strndup.o backport-writeback.o
+ nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
+ nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
+ nfs-$(CONFIG_NFS_V3_ACL)	+= nfs3acl.o
+diff --git a/fs/nfs/backport-namespace.c b/fs/nfs/backport-namespace.c
+new file mode 100644
+index 0000000..de57f8b
+--- /dev/null
++++ b/fs/nfs/backport-namespace.c
+@@ -0,0 +1 @@
++#include "src/namespace.c"
+diff --git a/fs/nfs/backport-strndup.c b/fs/nfs/backport-strndup.c
+new file mode 100644
+index 0000000..ec4b330
+--- /dev/null
++++ b/fs/nfs/backport-strndup.c
+@@ -0,0 +1 @@
++#include "src/strndup.c"
+diff --git a/fs/nfs/backport-writeback.c b/fs/nfs/backport-writeback.c
+new file mode 100644
+index 0000000..b838ead
+--- /dev/null
++++ b/fs/nfs/backport-writeback.c
+@@ -0,0 +1 @@
++#include "src/writeback.c"
 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
-index 5ee23e7..4fc5b7c 100644
+index 5ee23e7..afbb834 100644
 --- a/fs/nfs/client.c
 +++ b/fs/nfs/client.c
-@@ -395,7 +395,7 @@ found_client:
- 	if (new)
- 		nfs_free_client(new);
- 
--	error = wait_event_killable(nfs_client_active_wq,
-+	error = wait_event_interruptible(nfs_client_active_wq,
- 				clp->cl_cons_state != NFS_CS_INITING);
- 	if (error < 0) {
- 		nfs_put_client(clp);
-@@ -601,6 +601,10 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
- 	if (server->flags & NFS_MOUNT_SOFT)
- 		server->client->cl_softrtry = 1;
- 
-+	server->client->cl_intr = 0;
-+	if (server->flags & NFS4_MOUNT_INTR)
-+		server->client->cl_intr = 1;
-+
- 	return 0;
- }
- 
-@@ -796,10 +800,6 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
- 		goto out_error;
- 
- 	nfs_server_set_fsinfo(server, &fsinfo);
--	error = bdi_init(&server->backing_dev_info);
--	if (error)
--		goto out_error;
--
- 
- 	/* Get some general file system info */
- 	if (server->namelen == 0) {
-@@ -885,7 +885,6 @@ void nfs_free_server(struct nfs_server *server)
- 	nfs_put_client(server->nfs_client);
- 
- 	nfs_free_iostats(server->io_stats);
--	bdi_destroy(&server->backing_dev_info);
- 	kfree(server);
- 	nfs_release_automount_timer();
- 	dprintk("<-- nfs_free_server()\n");
-@@ -1324,7 +1323,6 @@ static const struct file_operations nfs_server_list_fops = {
- 	.read		= seq_read,
- 	.llseek		= seq_lseek,
- 	.release	= seq_release,
--	.owner		= THIS_MODULE,
- };
- 
- static int nfs_volume_list_open(struct inode *inode, struct file *file);
-@@ -1345,7 +1343,6 @@ static const struct file_operations nfs_volume_list_fops = {
- 	.read		= seq_read,
- 	.llseek		= seq_lseek,
- 	.release	= seq_release,
--	.owner		= THIS_MODULE,
- };
- 
- /*
-@@ -1372,9 +1369,23 @@ static int nfs_server_list_open(struct inode *inode, struct file *file)
-  */
- static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos)
- {
-+	struct list_head *_p;
-+	loff_t pos = *_pos;
-+
- 	/* lock the list against modification */
- 	spin_lock(&nfs_client_lock);
--	return seq_list_start_head(&nfs_client_list, *_pos);
-+
-+	/* allow for the header line */
-+	if (!pos)
-+		return SEQ_START_TOKEN;
-+	pos--;
-+
-+	/* find the n'th element in the list */
-+	list_for_each(_p, &nfs_client_list)
-+		if (!pos--)
-+			break;
-+
-+	return _p != &nfs_client_list ? _p : NULL;
- }
- 
- /*
-@@ -1382,7 +1393,14 @@ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos)
-  */
- static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos)
- {
--	return seq_list_next(v, &nfs_client_list, pos);
-+	struct list_head *_p;
-+
-+	(*pos)++;
-+
-+	_p = v;
-+	_p = (v == SEQ_START_TOKEN) ? nfs_client_list.next : _p->next;
-+
-+	return _p != &nfs_client_list ? _p : NULL;
- }
- 
- /*
-@@ -1401,7 +1419,7 @@ static int nfs_server_list_show(struct seq_file *m, void *v)
- 	struct nfs_client *clp;
- 
- 	/* display header on line 1 */
--	if (v == &nfs_client_list) {
-+	if (v == SEQ_START_TOKEN) {
- 		seq_puts(m, "NV SERVER   PORT USE HOSTNAME\n");
- 		return 0;
+@@ -248,6 +248,7 @@ static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
+ 				(const struct sockaddr_in6 *)sa2);
  	}
-@@ -1442,9 +1460,23 @@ static int nfs_volume_list_open(struct inode *inode, struct file *file)
-  */
- static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos)
- {
-+	struct list_head *_p;
-+	loff_t pos = *_pos;
-+
- 	/* lock the list against modification */
- 	spin_lock(&nfs_client_lock);
--	return seq_list_start_head(&nfs_volume_list, *_pos);
-+
-+	/* allow for the header line */
-+	if (!pos)
-+		return SEQ_START_TOKEN;
-+	pos--;
-+
-+	/* find the n'th element in the list */
-+	list_for_each(_p, &nfs_volume_list)
-+		if (!pos--)
-+			break;
-+
-+	return _p != &nfs_volume_list ? _p : NULL;
+ 	BUG();
++	return -EINVAL;
  }
  
  /*
-@@ -1452,7 +1484,14 @@ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos)
-  */
- static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos)
- {
--	return seq_list_next(v, &nfs_volume_list, pos);
-+	struct list_head *_p;
-+
-+	(*pos)++;
-+
-+	_p = v;
-+	_p = (v == SEQ_START_TOKEN) ? nfs_volume_list.next : _p->next;
-+
-+	return _p != &nfs_volume_list ? _p : NULL;
- }
- 
- /*
-@@ -1473,7 +1512,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v)
- 	char dev[8], fsid[17];
- 
- 	/* display header on line 1 */
--	if (v == &nfs_volume_list) {
-+	if (v == SEQ_START_TOKEN) {
- 		seq_puts(m, "NV SERVER   PORT DEV     FSID\n");
- 		return 0;
- 	}
-@@ -1512,16 +1551,20 @@ int __init nfs_fs_proc_init(void)
- 	proc_fs_nfs->owner = THIS_MODULE;
- 
- 	/* a file of servers with which we're dealing */
--	p = proc_create("servers", S_IFREG|S_IRUGO,
--			proc_fs_nfs, &nfs_server_list_fops);
-+	p = create_proc_entry("servers", S_IFREG|S_IRUGO, proc_fs_nfs);
- 	if (!p)
- 		goto error_1;
- 
-+	p->proc_fops = &nfs_server_list_fops;
-+	p->owner = THIS_MODULE;
-+
- 	/* a file of volumes that we have mounted */
--	p = proc_create("volumes", S_IFREG|S_IRUGO,
--			proc_fs_nfs, &nfs_volume_list_fops);
-+	p = create_proc_entry("volumes", S_IFREG|S_IRUGO, proc_fs_nfs);
- 	if (!p)
- 		goto error_2;
-+
-+	p->proc_fops = &nfs_volume_list_fops;
-+	p->owner = THIS_MODULE;
- 	return 0;
- 
- error_2:
 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
-index 74f92b7..961a8af 100644
+index 74f92b7..90d0a97 100644
 --- a/fs/nfs/dir.c
 +++ b/fs/nfs/dir.c
+@@ -66,7 +66,7 @@ const struct file_operations nfs_dir_operations = {
+ 	.fsync		= nfs_fsync_dir,
+ };
+ 
+-const struct inode_operations nfs_dir_inode_operations = {
++struct inode_operations nfs_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -82,7 +82,7 @@ const struct inode_operations nfs_dir_inode_operations = {
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_dir_inode_operations = {
++struct inode_operations nfs3_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -105,7 +105,7 @@ const struct inode_operations nfs3_dir_inode_operations = {
+ #ifdef CONFIG_NFS_V4
+ 
+ static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
+-const struct inode_operations nfs4_dir_inode_operations = {
++struct inode_operations nfs4_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_atomic_lookup,
+ 	.link		= nfs_link,
 @@ -134,8 +134,8 @@ nfs_opendir(struct inode *inode, struct file *filp)
  	int res;
  
@@ -481,21 +349,12 @@
  	struct inode *inode = dentry->d_inode;
  
  	dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
-@@ -939,7 +939,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
- 		goto out_unblock_sillyrename;
- 
- no_entry:
--	res = d_materialise_unique(dentry, inode);
-+	res = d_add_unique(dentry, inode);
- 	if (res != NULL) {
- 		if (IS_ERR(res))
- 			goto out_unblock_sillyrename;
 @@ -973,7 +973,7 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd)
  	if (nd->flags & LOOKUP_DIRECTORY)
  		return 0;
  	/* Are we trying to write to a read only partition? */
 -	if (__mnt_is_readonly(nd->path.mnt) &&
-+	if (IS_RDONLY(dir) &&
++	if (__mnt_is_readonly(nd->mnt) &&
  	    (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
  		return 0;
  	return 1;
@@ -508,16 +367,7 @@
  	struct inode *dir = parent->d_inode;
  	struct nfs_entry *entry = desc->entry;
  	struct dentry *dentry, *alias;
-@@ -1143,7 +1143,7 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
- 		return NULL;
- 	}
- 
--	alias = d_materialise_unique(dentry, inode);
-+	alias = d_add_unique(dentry, inode);
- 	if (alias != NULL) {
- 		dput(dentry);
- 		if (IS_ERR(alias))
-@@ -1907,17 +1907,17 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+@@ -1907,7 +1907,7 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
  	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
  }
  
@@ -526,11 +376,8 @@
  {
  	struct rpc_cred *cred;
  	int res = 0;
- 
- 	nfs_inc_stats(inode, NFSIOS_VFSACCESS);
- 
--	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
-+	if (mask == 0)
+@@ -1917,7 +1917,7 @@ int nfs_permission(struct inode *inode, int mask)
+ 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
  		goto out;
  	/* Is this sys_access() ? */
 -	if (mask & MAY_ACCESS)
@@ -548,11 +395,46 @@
  				goto out;
  			break;
  		case S_IFDIR:
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 08f6b04..91f5069 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -116,7 +116,7 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
+ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ {
+ 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
+-			iocb->ki_filp->f_path.dentry->d_name.name,
++			iocb->ki_filp->f_dentry->d_name.name,
+ 			(long long) pos, nr_segs);
+ 
+ 	return -EINVAL;
+@@ -891,8 +891,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
+ 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
+ 
+ 	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name,
+ 		count, (long long) pos);
+ 
+ 	retval = 0;
+@@ -948,8 +948,8 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+ 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
+ 
+ 	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name,
+ 		count, (long long) pos);
+ 
+ 	retval = generic_write_checks(file, &pos, &count, 0);
 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
-index 7846065..45f7901 100644
+index 7846065..be3318a 100644
 --- a/fs/nfs/file.c
 +++ b/fs/nfs/file.c
-@@ -42,13 +42,9 @@ static int nfs_file_open(struct inode *, struct file *);
+@@ -42,19 +42,14 @@ static int nfs_file_open(struct inode *, struct file *);
  static int nfs_file_release(struct inode *, struct file *);
  static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin);
  static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
@@ -564,21 +446,41 @@
 -static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
 -				unsigned long nr_segs, loff_t pos);
 +static ssize_t nfs_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
-+static ssize_t nfs_file_read(struct kiocb *, char __user *, size_t, loff_t);
-+static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
++static ssize_t nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos);
++static ssize_t nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos);
  static int  nfs_file_flush(struct file *, fl_owner_t id);
  static int  nfs_file_fsync(struct file *, struct dentry *dentry, int datasync);
  static int nfs_check_flags(int flags);
-@@ -75,7 +71,7 @@ const struct file_operations nfs_file_operations = {
+ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
+ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
+ 
+ static struct vm_operations_struct nfs_file_vm_ops;
+ 
+@@ -75,19 +70,18 @@ const struct file_operations nfs_file_operations = {
  	.fsync		= nfs_file_fsync,
  	.lock		= nfs_lock,
  	.flock		= nfs_flock,
 -	.splice_read	= nfs_file_splice_read,
-+	.sendfile       = nfs_file_sendfile,
++	.sendfile	= nfs_file_sendfile,
  	.check_flags	= nfs_check_flags,
- 	.setlease	= nfs_setlease,
+-	.setlease	= nfs_setlease,
  };
-@@ -120,8 +116,8 @@ nfs_file_open(struct inode *inode, struct file *filp)
+ 
+-const struct inode_operations nfs_file_inode_operations = {
++struct inode_operations nfs_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_file_inode_operations = {
++struct inode_operations nfs3_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+@@ -120,8 +114,8 @@ nfs_file_open(struct inode *inode, struct file *filp)
  	int res;
  
  	dprintk("NFS: open file(%s/%s)\n",
@@ -589,7 +491,7 @@
  
  	res = nfs_check_flags(filp->f_flags);
  	if (res)
-@@ -135,7 +131,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
+@@ -135,7 +129,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
  static int
  nfs_file_release(struct inode *inode, struct file *filp)
  {
@@ -598,7 +500,7 @@
  
  	dprintk("NFS: release(%s/%s)\n",
  			dentry->d_parent->d_name.name,
-@@ -178,11 +174,9 @@ force_reval:
+@@ -178,11 +172,9 @@ force_reval:
  
  static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
  {
@@ -612,7 +514,7 @@
  			offset, origin);
  
  	/* origin == SEEK_END => we must revalidate the cached file length */
-@@ -192,10 +186,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+@@ -192,10 +184,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
  		if (retval < 0)
  			return (loff_t)retval;
  	}
@@ -624,7 +526,7 @@
  }
  
  /*
-@@ -230,7 +221,7 @@ static int
+@@ -230,7 +219,7 @@ static int
  nfs_file_flush(struct file *file, fl_owner_t id)
  {
  	struct nfs_open_context *ctx = nfs_file_open_context(file);
@@ -633,7 +535,7 @@
  	struct inode	*inode = dentry->d_inode;
  	int		status;
  
-@@ -250,16 +241,14 @@ nfs_file_flush(struct file *file, fl_owner_t id)
+@@ -250,16 +239,15 @@ nfs_file_flush(struct file *file, fl_owner_t id)
  }
  
  static ssize_t
@@ -646,14 +548,15 @@
  	struct inode * inode = dentry->d_inode;
  	ssize_t result;
 -	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
  
  	if (iocb->ki_filp->f_flags & O_DIRECT)
 -		return nfs_file_direct_read(iocb, iov, nr_segs, pos);
-+		return nfs_file_direct_read(iocb, buf, count, pos);
++		return nfs_file_direct_read(iocb, &local_iov, 1, pos);
  
  	dprintk("NFS: read(%s/%s, %lu@%lu)\n",
  		dentry->d_parent->d_name.name, dentry->d_name.name,
-@@ -268,33 +257,32 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
+@@ -268,33 +256,32 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
  	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
  	nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count);
  	if (!result)
@@ -694,34 +597,14 @@
  	struct inode *inode = dentry->d_inode;
  	int	status;
  
-@@ -304,7 +292,6 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
- 	status = nfs_revalidate_mapping(inode, file->f_mapping);
- 	if (!status) {
- 		vma->vm_ops = &nfs_file_vm_ops;
--		vma->vm_flags |= VM_CAN_NONLINEAR;
- 		file_accessed(file);
- 	}
- 	return status;
-@@ -330,80 +317,28 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
- }
- 
- /*
-- * This does the "real" work of the write. We must allocate and lock the
-- * page to be sent back to the generic routine, which then copies the
-- * data from user space.
-+ * This does the "real" work of the write. The generic routine has
-+ * allocated the page, locked it, done all the page alignment stuff
-+ * calculations etc. Now we should just copy the data from user
-+ * space and write it back to the real medium.
-  *
+@@ -337,44 +324,15 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
   * If the writer ends up delaying the write, the writer needs to
   * increment the page use counts until he is done with the page.
   */
 -static int nfs_write_begin(struct file *file, struct address_space *mapping,
 -			loff_t pos, unsigned len, unsigned flags,
 -			struct page **pagep, void **fsdata)
-+static int nfs_prepare_write(struct file *file, struct page *page, 
-+		unsigned offset, unsigned to)
++static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
  {
 -	int ret;
 -	pgoff_t index;
@@ -753,59 +636,61 @@
 +static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
  {
 -	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
--	int status;
+ 	int status;
 -
 -	dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
 -		file->f_path.dentry->d_parent->d_name.name,
 -		file->f_path.dentry->d_name.name,
 -		mapping->host->i_ino, len, (long long) pos);
--
--	/*
--	 * Zero any uninitialised parts of the page, and then mark the page
--	 * as up to date if it turns out that we're extending the file.
--	 */
--	if (!PageUptodate(page)) {
--		unsigned pglen = nfs_page_length(page);
++	unsigned copied = to - offset;
+ 
+ 	/*
+ 	 * Zero any uninitialised parts of the page, and then mark the page
+@@ -382,14 +340,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
 -		unsigned end = offset + len;
--
--		if (pglen == 0) {
--			zero_user_segments(page, 0, offset,
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
 -					end, PAGE_CACHE_SIZE);
--			SetPageUptodate(page);
++					to, PAGE_CACHE_SIZE);
+ 			SetPageUptodate(page);
 -		} else if (end >= pglen) {
 -			zero_user_segment(page, end, PAGE_CACHE_SIZE);
--			if (offset == 0)
--				SetPageUptodate(page);
--		} else
--			zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
--	}
--
--	status = nfs_updatepage(file, page, offset, copied);
-+	long status;
++		} else if (to >= pglen) {
++			zero_user_segment(page, to, PAGE_CACHE_SIZE);
+ 			if (offset == 0)
+ 				SetPageUptodate(page);
+ 		} else
+@@ -398,22 +355,19 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
  
+ 	status = nfs_updatepage(file, page, offset, copied);
+ 
 -	unlock_page(page);
 -	page_cache_release(page);
 -
--	if (status < 0)
--		return status;
--	return copied;
-+	lock_kernel();
-+	status = nfs_updatepage(file, page, offset, to-offset);
-+	unlock_kernel();
-+	return status;
+ 	if (status < 0)
+ 		return status;
+ 	return copied;
  }
  
- static void nfs_invalidate_page(struct page *page, unsigned long offset)
-@@ -413,7 +348,7 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
+-static void nfs_invalidate_page(struct page *page, unsigned long offset)
++static int nfs_invalidate_page(struct page *page, unsigned long offset)
+ {
+ 	dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
+ 
  	if (offset != 0)
- 		return;
+-		return;
++		return 1;
  	/* Cancel any unstarted writes on this page */
 -	nfs_wb_page_cancel(page->mapping->host, page);
-+	nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
++	return nfs_wb_page_cancel(page->mapping->host, page);
  }
  
  static int nfs_release_page(struct page *page, gfp_t gfp)
-@@ -424,15 +359,6 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+@@ -424,34 +378,23 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
  	return 0;
  }
  
@@ -818,10 +703,10 @@
 -
 -	return nfs_wb_page(inode, page);
 -}
- 
+-
  const struct address_space_operations nfs_file_aops = {
  	.readpage = nfs_readpage,
-@@ -440,18 +366,17 @@ const struct address_space_operations nfs_file_aops = {
+ 	.readpages = nfs_readpages,
  	.set_page_dirty = __set_page_dirty_nobuffers,
  	.writepage = nfs_writepage,
  	.writepages = nfs_writepages,
@@ -843,36 +728,39 @@
  	unsigned pagelen;
  	int ret = -EINVAL;
  	struct address_space *mapping;
-@@ -484,7 +409,6 @@ out_unlock:
+@@ -484,7 +427,8 @@ out_unlock:
  }
  
  static struct vm_operations_struct nfs_file_vm_ops = {
 -	.fault = filemap_fault,
++	.nopage		= filemap_nopage,
++	.populate	= filemap_populate,
  	.page_mkwrite = nfs_vm_page_mkwrite,
  };
  
-@@ -500,16 +424,15 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+@@ -500,16 +444,16 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
  	return 0;
  }
  
 -static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
 -				unsigned long nr_segs, loff_t pos)
-+static ssize_t nfs_file_write(struct kiocb *iocb, const char __user *buf, 
-+		size_t count, loff_t pos)
++static ssize_t
++nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
  {
 -	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
 +	struct dentry * dentry = iocb->ki_filp->f_dentry;
  	struct inode * inode = dentry->d_inode;
  	ssize_t result;
 -	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
  
  	if (iocb->ki_filp->f_flags & O_DIRECT)
 -		return nfs_file_direct_write(iocb, iov, nr_segs, pos);
-+		return nfs_file_direct_write(iocb, buf, count, pos);
++		return nfs_file_direct_write(iocb, &local_iov, 1, pos);
  
  	dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
  		dentry->d_parent->d_name.name, dentry->d_name.name,
-@@ -532,7 +455,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+@@ -532,7 +476,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
  		goto out;
  
  	nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
@@ -881,7 +769,7 @@
  	/* Return error values for O_SYNC and IS_SYNC() */
  	if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
  		int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
-@@ -662,15 +585,16 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+@@ -662,8 +606,8 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
  	int ret = -ENOLCK;
  
  	dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
@@ -892,16 +780,7 @@
  			fl->fl_type, fl->fl_flags,
  			(long long)fl->fl_start, (long long)fl->fl_end);
  
- 	nfs_inc_stats(inode, NFSIOS_VFSLOCK);
- 
- 	/* No mandatory locks over NFS */
--	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-+	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 
-+			fl->fl_type != F_UNLCK)
- 		goto out_err;
- 
- 	if (NFS_PROTO(inode)->lock_check_bounds != NULL) {
-@@ -695,8 +619,8 @@ out_err:
+@@ -695,8 +639,8 @@ out_err:
  static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
  {
  	dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
@@ -912,126 +791,70 @@
  			fl->fl_type, fl->fl_flags);
  
  	/*
-@@ -726,8 +650,8 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
- static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
- {
- 	dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
+@@ -718,16 +662,3 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+ 		return do_unlk(filp, cmd, fl);
+ 	return do_setlk(filp, cmd, fl);
+ }
+-
+-/*
+- * There is no protocol support for leases, so we have no way to implement
+- * them correctly in the face of opens by other clients.
+- */
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+-{
+-	dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
 -			file->f_path.dentry->d_parent->d_name.name,
 -			file->f_path.dentry->d_name.name, arg);
-+			file->f_dentry->d_parent->d_name.name,
-+			file->f_dentry->d_name.name, arg);
- 
- 	return -EINVAL;
- }
+-
+-	return -EINVAL;
+-}
 diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
-index fae9719..9abe180 100644
+index fae9719..5bf9b3c 100644
 --- a/fs/nfs/getroot.c
 +++ b/fs/nfs/getroot.c
-@@ -30,7 +30,7 @@
+@@ -30,7 +30,6 @@
  #include <linux/nfs_idmap.h>
  #include <linux/vfs.h>
  #include <linux/namei.h>
 -#include <linux/mnt_namespace.h>
-+#include <linux/namespace.h>
  #include <linux/security.h>
  
  #include <asm/system.h>
-@@ -96,7 +96,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
- 	inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
- 	if (IS_ERR(inode)) {
- 		dprintk("nfs_get_root: get root inode failed\n");
--		return ERR_CAST(inode);
-+		return ERR_PTR(PTR_ERR(inode));
- 	}
- 
- 	error = nfs_superblock_set_dummy_root(sb, inode);
-@@ -266,7 +266,7 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
- 	inode = nfs_fhget(sb, mntfh, &fattr);
- 	if (IS_ERR(inode)) {
- 		dprintk("nfs_get_root: get root inode failed\n");
--		return ERR_CAST(inode);
-+		return ERR_PTR(PTR_ERR(inode));
- 	}
- 
- 	error = nfs_superblock_set_dummy_root(sb, inode);
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 86147b0..148aebe 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -376,7 +376,7 @@ idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ static ssize_t
+ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ {
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	struct idmap *idmap = (struct idmap *)rpci->private;
+ 	struct idmap_msg im_in, *im = &idmap->idmap_im;
+ 	struct idmap_hashtable *h;
 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
-index 52daefa..13cc0e0 100644
+index 52daefa..4db7185 100644
 --- a/fs/nfs/inode.c
 +++ b/fs/nfs/inode.c
-@@ -279,28 +279,36 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
- 		/* Why so? Because we want revalidate for devices/FIFOs, and
- 		 * that's precisely what we have in nfs_file_inode_operations.
+@@ -281,12 +281,12 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
  		 */
--		inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
-+		inode->i_op = (struct inode_operations *)
-+			NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
+ 		inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
  		if (S_ISREG(inode->i_mode)) {
 -			inode->i_fop = &nfs_file_operations;
 -			inode->i_data.a_ops = &nfs_file_aops;
-+			inode->i_fop = (struct file_operations  *)
-+				&nfs_file_operations;
-+			inode->i_data.a_ops = (struct address_space_operations *)
-+				&nfs_file_aops;
++			inode->i_fop = (struct file_operations *)&nfs_file_operations;
++			inode->i_data.a_ops = (struct address_space_operations *)&nfs_file_aops;
  			inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
  		} else if (S_ISDIR(inode->i_mode)) {
--			inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
+ 			inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
 -			inode->i_fop = &nfs_dir_operations;
-+			inode->i_op = (struct inode_operations *)
-+				NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
-+			inode->i_fop = (struct file_operations  *)
-+				&nfs_dir_operations;
++			inode->i_fop = (struct file_operations *)&nfs_dir_operations;
  			if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
  			    && fattr->size <= NFS_LIMIT_READDIRPLUS)
  				set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
- 			/* Deal with crossing mountpoints */
- 			if (!nfs_fsid_equal(&NFS_SB(sb)->fsid, &fattr->fsid)) {
- 				if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
--					inode->i_op = &nfs_referral_inode_operations;
-+					inode->i_op = (struct inode_operations *)
-+						&nfs_referral_inode_operations;
- 				else
--					inode->i_op = &nfs_mountpoint_inode_operations;
-+					inode->i_op = (struct inode_operations *)
-+						&nfs_mountpoint_inode_operations;
- 				inode->i_fop = NULL;
- 				set_bit(NFS_INO_MOUNTPOINT, &nfsi->flags);
- 			}
- 		} else if (S_ISLNK(inode->i_mode))
--			inode->i_op = &nfs_symlink_inode_operations;
-+			inode->i_op = (struct inode_operations *)
-+				&nfs_symlink_inode_operations;
- 		else
- 			init_special_inode(inode, inode->i_mode, fattr->rdev);
- 
-@@ -485,11 +493,15 @@ static int nfs_wait_schedule(void *word)
+@@ -612,7 +612,7 @@ static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
   */
- static int nfs_wait_on_inode(struct inode *inode)
- {
-+	struct rpc_clnt	*clnt = NFS_CLIENT(inode);
- 	struct nfs_inode *nfsi = NFS_I(inode);
-+	sigset_t oldmask;
- 	int error;
- 
-+	rpc_clnt_sigmask(clnt, &oldmask);
- 	error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
--					nfs_wait_schedule, TASK_KILLABLE);
-+					nfs_wait_schedule, TASK_INTERRUPTIBLE);
-+	rpc_clnt_sigunmask(clnt, &oldmask);
- 
- 	return error;
- }
-@@ -592,7 +604,8 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int wait)
- 	}
- 	if (ctx->cred != NULL)
- 		put_rpccred(ctx->cred);
--	path_put(&ctx->path);
-+	dput(ctx->path.dentry);
-+	mntput(ctx->path.mnt);
- 	kfree(ctx);
- }
- 
-@@ -612,7 +625,7 @@ static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
-  */
  static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
  {
 -	struct inode *inode = filp->f_path.dentry->d_inode;
@@ -1039,7 +862,7 @@
  	struct nfs_inode *nfsi = NFS_I(inode);
  
  	filp->private_data = get_nfs_open_context(ctx);
-@@ -644,7 +657,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
+@@ -644,7 +644,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
  
  static void nfs_file_clear_open_context(struct file *filp)
  {
@@ -1048,7 +871,7 @@
  	struct nfs_open_context *ctx = nfs_file_open_context(filp);
  
  	if (ctx) {
-@@ -667,7 +680,7 @@ int nfs_open(struct inode *inode, struct file *filp)
+@@ -667,7 +667,7 @@ int nfs_open(struct inode *inode, struct file *filp)
  	cred = rpc_lookup_cred();
  	if (IS_ERR(cred))
  		return PTR_ERR(cred);
@@ -1057,417 +880,209 @@
  	put_rpccred(cred);
  	if (ctx == NULL)
  		return -ENOMEM;
-diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
-index 08f6b04..2684401 100644
---- a/fs/nfs/direct.c
-+++ b/fs/nfs/direct.c
-@@ -116,7 +116,7 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
- ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
- {
- 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
--			iocb->ki_filp->f_path.dentry->d_name.name,
-+			iocb->ki_filp->f_dentry->d_name.name,
- 			(long long) pos, nr_segs);
- 
- 	return -EINVAL;
-@@ -193,7 +193,7 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
- 	if (dreq->iocb)
- 		goto out;
- 
--	result = wait_for_completion_killable(&dreq->completion);
-+	result = wait_for_completion_interruptible(&dreq->completion);
- 
- 	if (!result)
- 		result = dreq->error;
-@@ -270,14 +270,10 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
-  * handled automatically by nfs_direct_read_result().  Otherwise, if
-  * no requests have been sent, just return an error.
-  */
--static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
--						const struct iovec *iov,
--						loff_t pos)
-+static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
- {
- 	struct nfs_open_context *ctx = dreq->ctx;
- 	struct inode *inode = ctx->path.dentry->d_inode;
--	unsigned long user_addr = (unsigned long)iov->iov_base;
--	size_t count = iov->iov_len;
- 	size_t rsize = NFS_SERVER(inode)->rsize;
- 	struct rpc_task *task;
- 	struct rpc_message msg = {
-@@ -294,6 +290,8 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
- 	int result;
- 	ssize_t started = 0;
- 
-+	get_dreq(dreq);
-+
- 	do {
- 		struct nfs_read_data *data;
- 		size_t bytes;
-@@ -370,49 +368,20 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
- 		count -= bytes;
- 	} while (count != 0);
- 
--	if (started)
--		return started;
--	return result < 0 ? (ssize_t) result : -EFAULT;
--}
--
--static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
--					      const struct iovec *iov,
--					      unsigned long nr_segs,
--					      loff_t pos)
--{
--	ssize_t result = -EINVAL;
--	size_t requested_bytes = 0;
--	unsigned long seg;
--
--	get_dreq(dreq);
--
--	for (seg = 0; seg < nr_segs; seg++) {
--		const struct iovec *vec = &iov[seg];
--		result = nfs_direct_read_schedule_segment(dreq, vec, pos);
--		if (result < 0)
--			break;
--		requested_bytes += result;
--		if ((size_t)result < vec->iov_len)
--			break;
--		pos += vec->iov_len;
--	}
--
- 	if (put_dreq(dreq))
- 		nfs_direct_complete(dreq);
- 
--	if (requested_bytes != 0)
-+	if (started)
- 		return 0;
--
--	if (result < 0)
--		return result;
--	return -EIO;
-+	return result < 0 ? (ssize_t) result : -EFAULT;
+@@ -1242,7 +1242,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
+ #endif
  }
  
--static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
--			       unsigned long nr_segs, loff_t pos)
-+static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
+-static void init_once(void *foo)
++static void init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
  {
- 	ssize_t result = 0;
-+	sigset_t oldset;
- 	struct inode *inode = iocb->ki_filp->f_mapping->host;
-+	struct rpc_clnt *clnt = NFS_CLIENT(inode);
- 	struct nfs_direct_req *dreq;
+ 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
  
- 	dreq = nfs_direct_req_alloc();
-@@ -424,9 +393,12 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
- 	if (!is_sync_kiocb(iocb))
- 		dreq->iocb = iocb;
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 66df08d..8a39bdf 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -107,29 +107,29 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
  
--	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
-+	nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
-+	rpc_clnt_sigmask(clnt, &oldset);
-+	result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
- 	if (!result)
- 		result = nfs_direct_wait(dreq);
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	nfs_direct_req_release(dreq);
+ 	BUG_ON(IS_ROOT(dentry));
+ 	dprintk("%s: enter\n", __func__);
+-	dput(nd->path.dentry);
+-	nd->path.dentry = dget(dentry);
++	dput(nd->dentry);
++	nd->dentry = dget(dentry);
  
- 	return result;
-@@ -684,14 +656,11 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
-  * handled automatically by nfs_direct_write_result().  Otherwise, if
-  * no requests have been sent, just return an error.
-  */
--static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
--						 const struct iovec *iov,
--						 loff_t pos, int sync)
-+static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, 
-+		unsigned long user_addr, size_t count, loff_t pos, int sync)
- {
- 	struct nfs_open_context *ctx = dreq->ctx;
- 	struct inode *inode = ctx->path.dentry->d_inode;
--	unsigned long user_addr = (unsigned long)iov->iov_base;
--	size_t count = iov->iov_len;
- 	struct rpc_task *task;
- 	struct rpc_message msg = {
- 		.rpc_cred = ctx->cred,
-@@ -708,6 +677,8 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
- 	int result;
- 	ssize_t started = 0;
+ 	/* Look it up again */
+-	parent = dget_parent(nd->path.dentry);
++	parent = dget_parent(nd->dentry);
+ 	err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
+-						  &nd->path.dentry->d_name,
++						  &nd->dentry->d_name,
+ 						  &fh, &fattr);
+ 	dput(parent);
+ 	if (err != 0)
+ 		goto out_err;
  
-+	get_dreq(dreq);
-+
- 	do {
- 		struct nfs_write_data *data;
- 		size_t bytes;
-@@ -788,51 +759,20 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
- 		count -= bytes;
- 	} while (count != 0);
+ 	if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
+-		mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
++		mnt = nfs_do_refmount(nd->mnt, nd->dentry);
+ 	else
+-		mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
++		mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh,
+ 				      &fattr);
+ 	err = PTR_ERR(mnt);
+ 	if (IS_ERR(mnt))
+ 		goto out_err;
  
--	if (started)
--		return started;
--	return result < 0 ? (ssize_t) result : -EFAULT;
--}
--
--static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
--					       const struct iovec *iov,
--					       unsigned long nr_segs,
--					       loff_t pos, int sync)
--{
--	ssize_t result = 0;
--	size_t requested_bytes = 0;
--	unsigned long seg;
--
--	get_dreq(dreq);
--
--	for (seg = 0; seg < nr_segs; seg++) {
--		const struct iovec *vec = &iov[seg];
--		result = nfs_direct_write_schedule_segment(dreq, vec,
--							   pos, sync);
--		if (result < 0)
--			break;
--		requested_bytes += result;
--		if ((size_t)result < vec->iov_len)
--			break;
--		pos += vec->iov_len;
--	}
--
- 	if (put_dreq(dreq))
--		nfs_direct_write_complete(dreq, dreq->inode);
-+		nfs_direct_write_complete(dreq, inode);
- 
--	if (requested_bytes != 0)
-+	if (started)
- 		return 0;
--
--	if (result < 0)
--		return result;
--	return -EIO;
-+	return result < 0 ? (ssize_t) result : -EFAULT;
+ 	mntget(mnt);
+-	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
++	err = do_add_mount(mnt, nd, nd->mnt->mnt_flags,
+ 			   &nfs_automount_list);
+ 	if (err < 0) {
+ 		mntput(mnt);
+@@ -137,9 +137,9 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 			goto out_follow;
+ 		goto out_err;
+ 	}
+-	path_put(&nd->path);
+-	nd->path.mnt = mnt;
+-	nd->path.dentry = dget(mnt->mnt_root);
++	backport_path_put(nd);
++	nd->mnt = mnt;
++	nd->dentry = dget(mnt->mnt_root);
+ 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+ out:
+ 	dprintk("%s: done, returned %d\n", __func__, err);
+@@ -147,22 +147,22 @@ out:
+ 	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
+ 	return ERR_PTR(err);
+ out_err:
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	goto out;
+ out_follow:
+-	while (d_mountpoint(nd->path.dentry) &&
+-	       follow_down(&nd->path.mnt, &nd->path.dentry))
++	while (d_mountpoint(nd->dentry) &&
++	       follow_down(&nd->mnt, &nd->dentry))
+ 		;
+ 	err = 0;
+ 	goto out;
  }
  
--static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
--				unsigned long nr_segs, loff_t pos,
--				size_t count)
-+static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
- {
- 	ssize_t result = 0;
-+	sigset_t oldset;
- 	struct inode *inode = iocb->ki_filp->f_mapping->host;
-+	struct rpc_clnt *clnt = NFS_CLIENT(inode);
- 	struct nfs_direct_req *dreq;
- 	size_t wsize = NFS_SERVER(inode)->wsize;
- 	int sync = NFS_UNSTABLE;
-@@ -850,9 +790,13 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
- 	if (!is_sync_kiocb(iocb))
- 		dreq->iocb = iocb;
+-const struct inode_operations nfs_mountpoint_inode_operations = {
++struct inode_operations nfs_mountpoint_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ 	.getattr	= nfs_getattr,
+ };
  
--	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
-+	nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
-+
-+	rpc_clnt_sigmask(clnt, &oldset);
-+	result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
- 	if (!result)
- 		result = nfs_direct_wait(dreq);
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	nfs_direct_req_release(dreq);
+-const struct inode_operations nfs_referral_inode_operations = {
++struct inode_operations nfs_referral_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ };
  
- 	return result;
-@@ -861,8 +805,8 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
- /**
-  * nfs_file_direct_read - file direct read operation for NFS files
-  * @iocb: target I/O control block
-- * @iov: vector of user buffers into which to read data
-- * @nr_segs: size of iov vector
-+ * @buf: user's buffer into which to read data
-+ * @count: number of bytes to read
-  * @pos: byte offset in file where reading starts
-  *
-  * We use this function for direct reads instead of calling
-@@ -879,22 +823,22 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
-  * client must read the updated atime from the server back into its
-  * cache.
-  */
--ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
--				unsigned long nr_segs, loff_t pos)
-+ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
- {
- 	ssize_t retval = -EINVAL;
- 	struct file *file = iocb->ki_filp;
- 	struct address_space *mapping = file->f_mapping;
--	size_t count;
+@@ -193,14 +193,14 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
+ 	switch (server->nfs_client->rpc_ops->version) {
+ 		case 2:
+ 		case 3:
+-			mnt = vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
++			mnt = do_kern_mount("nfs", 0, devname, mountdata);
+ 			break;
+ 		case 4:
+-			mnt = vfs_kern_mount(&nfs4_xdev_fs_type, 0, devname, mountdata);
++			mnt = do_kern_mount("nfs4", 0, devname, mountdata);
+ 	}
+ 	return mnt;
+ #else
+-	return vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
++	return do_kern_mount("nfs", 0, devname, mountdata);
+ #endif
+ }
  
--	count = iov_length(iov, nr_segs);
- 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
- 
- 	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
--		file->f_path.dentry->d_parent->d_name.name,
--		file->f_path.dentry->d_name.name,
-+		file->f_dentry->d_parent->d_name.name,
-+		file->f_dentry->d_name.name,
- 		count, (long long) pos);
- 
-+	retval = -EFAULT;
-+	if (!access_ok(VERIFY_WRITE, buf, count))
-+		goto out;
- 	retval = 0;
- 	if (!count)
- 		goto out;
-@@ -903,7 +847,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
- 	if (retval)
- 		goto out;
- 
--	retval = nfs_direct_read(iocb, iov, nr_segs, pos);
-+	retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
- 	if (retval > 0)
- 		iocb->ki_pos = pos + retval;
- 
-@@ -914,8 +858,8 @@ out:
- /**
-  * nfs_file_direct_write - file direct write operation for NFS files
-  * @iocb: target I/O control block
-- * @iov: vector of user buffers from which to write data
-- * @nr_segs: size of iov vector
-+ * @buf: user's buffer from which to write data
-+ * @count: number of bytes to write
-  * @pos: byte offset in file where writing starts
-  *
-  * We use this function for direct writes instead of calling
-@@ -936,20 +880,17 @@ out:
-  * Note that O_APPEND is not supported for NFS direct writes, as there
-  * is no atomic O_APPEND write facility in the NFS protocol.
-  */
--ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
--				unsigned long nr_segs, loff_t pos)
-+ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 1e750e4..bdeef69 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -779,7 +779,7 @@ static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
+ static int
+ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
  {
- 	ssize_t retval = -EINVAL;
- 	struct file *file = iocb->ki_filp;
- 	struct address_space *mapping = file->f_mapping;
--	size_t count;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
  
--	count = iov_length(iov, nr_segs);
- 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
+ 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ }
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index ea79064..7a8e6fa 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -165,7 +165,7 @@ struct nfs4_state_recovery_ops {
+ };
  
- 	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
--		file->f_path.dentry->d_parent->d_name.name,
--		file->f_path.dentry->d_name.name,
-+		file->f_dentry->d_parent->d_name.name,
-+		file->f_dentry->d_name.name,
- 		count, (long long) pos);
+ extern struct dentry_operations nfs4_dentry_operations;
+-extern const struct inode_operations nfs4_dir_inode_operations;
++extern struct inode_operations nfs4_dir_inode_operations;
  
- 	retval = generic_write_checks(file, &pos, &count, 0);
-@@ -963,11 +904,15 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
- 	if (!count)
- 		goto out;
- 
-+	retval = -EFAULT;
-+	if (!access_ok(VERIFY_READ, buf, count))
-+		goto out;
-+
- 	retval = nfs_sync_mapping(mapping);
- 	if (retval)
- 		goto out;
- 
--	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
-+	retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
- 
- 	if (retval > 0)
- 		iocb->ki_pos = pos + retval;
-diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
-index 7f07920..de1d8ea 100644
---- a/fs/nfs/pagelist.c
-+++ b/fs/nfs/pagelist.c
-@@ -58,6 +58,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
- 		   struct page *page,
- 		   unsigned int offset, unsigned int count)
+ /* inode.c */
+ extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c910413..02f1156 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1384,7 +1384,7 @@ struct dentry *
+ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
  {
-+	struct nfs_server *server = NFS_SERVER(inode);
- 	struct nfs_page		*req;
- 
- 	for (;;) {
-@@ -66,7 +67,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
- 		if (req != NULL)
- 			break;
- 
--		if (fatal_signal_pending(current))
-+		if (signalled() && (server->flags & NFS_MOUNT_INTR))
- 			return ERR_PTR(-ERESTARTSYS);
- 		yield();
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct dentry *parent;
+@@ -1421,8 +1421,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
  	}
-@@ -176,11 +177,11 @@ void nfs_release_request(struct nfs_page *req)
- 	kref_put(&req->wb_kref, nfs_free_request);
- }
- 
--static int nfs_wait_bit_killable(void *word)
-+static int nfs_wait_bit_interruptible(void *word)
+ 	res = d_add_unique(dentry, igrab(state->inode));
+ 	if (res != NULL)
+-		path.dentry = res;
+-	nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
++		dentry = res;
++	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 	nfs_unblock_sillyrename(parent);
+ 	nfs4_intent_set_file(nd, &path, state);
+ 	return res;
+@@ -1432,7 +1432,7 @@ int
+ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
  {
- 	int ret = 0;
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct rpc_cred *cred;
+@@ -1880,7 +1880,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+                  int flags, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct nfs4_state *state;
+@@ -3671,7 +3671,7 @@ struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = {
+ 	.recover_lock	= nfs4_lock_expired,
+ };
  
--	if (fatal_signal_pending(current))
-+	if (signal_pending(current))
- 		ret = -ERESTARTSYS;
- 	else
- 		schedule();
-@@ -191,18 +192,26 @@ static int nfs_wait_bit_killable(void *word)
-  * nfs_wait_on_request - Wait for a request to complete.
-  * @req: request to wait upon.
-  *
-- * Interruptible by fatal signals only.
-+ * Interruptible by signals only if mounted with intr flag.
-  * The user is responsible for holding a count on the request.
-  */
- int
- nfs_wait_on_request(struct nfs_page *req)
+-static const struct inode_operations nfs4_file_inode_operations = {
++static struct inode_operations nfs4_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
+index 4dbb84d..c351a41 100644
+--- a/fs/nfs/proc.c
++++ b/fs/nfs/proc.c
+@@ -595,7 +595,7 @@ nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+ static int
+ nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
  {
-+	struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
-+	sigset_t oldmask;
- 	int ret = 0;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
  
- 	if (!test_bit(PG_BUSY, &req->wb_flags))
- 		goto out;
-+	/*
-+	 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
-+	 * 	 are not interrupted if intr flag is not set
-+	 */
-+	rpc_clnt_sigmask(clnt, &oldmask);
- 	ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
--			nfs_wait_bit_killable, TASK_KILLABLE);
-+			nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
-+	rpc_clnt_sigunmask(clnt, &oldmask);
- out:
- 	return ret;
+ 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
  }
 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
-index 9abcd2b..12dca22 100644
+index e9b2017..3ff4956 100644
 --- a/fs/nfs/super.c
 +++ b/fs/nfs/super.c
-@@ -49,7 +49,6 @@
- #include <net/ipv6.h>
- #include <linux/netdevice.h>
- #include <linux/nfs_xdr.h>
--#include <linux/magic.h>
- #include <linux/parser.h>
- 
- #include <asm/system.h>
-@@ -66,6 +65,7 @@
- enum {
- 	/* Mount options that take no arguments */
- 	Opt_soft, Opt_hard,
-+	Opt_intr, Opt_nointr,
- 	Opt_posix, Opt_noposix,
- 	Opt_cto, Opt_nocto,
- 	Opt_ac, Opt_noac,
-@@ -107,8 +107,8 @@ static match_table_t nfs_mount_option_tokens = {
- 
- 	{ Opt_soft, "soft" },
- 	{ Opt_hard, "hard" },
--	{ Opt_deprecated, "intr" },
--	{ Opt_deprecated, "nointr" },
-+	{ Opt_intr, "intr" },
-+	{ Opt_nointr, "nointr" },
- 	{ Opt_posix, "posix" },
- 	{ Opt_noposix, "noposix" },
- 	{ Opt_cto, "cto" },
 @@ -204,10 +204,9 @@ static match_table_t nfs_secflavor_tokens = {
  static void nfs_umount_begin(struct super_block *);
  static int  nfs_statfs(struct dentry *, struct kstatfs *);
@@ -1476,12 +1091,21 @@
 -static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
 -static int nfs_xdev_get_sb(struct file_system_type *fs_type,
 -		int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
-+static struct super_block * nfs_get_sb(struct file_system_type *, int, const char *, void *);
-+static struct super_block * nfs_xdev_get_sb(struct file_system_type *fs_type,
++static struct super_block *nfs_get_sb(struct file_system_type *, int, const char *, void *);
++static struct super_block *nfs_xdev_get_sb(struct file_system_type *fs_type,
 +		int flags, const char *dev_name, void *raw_data);
  static void nfs_kill_super(struct super_block *);
  static void nfs_put_super(struct super_block *);
  static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
+@@ -228,7 +227,7 @@ struct file_system_type nfs_xdev_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
+ 
+-static const struct super_operations nfs_sops = {
++static struct super_operations nfs_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
 @@ -237,17 +236,16 @@ static const struct super_operations nfs_sops = {
  	.clear_inode	= nfs_clear_inode,
  	.umount_begin	= nfs_umount_begin,
@@ -1497,15 +1121,24 @@
 -	int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
 -static int nfs4_referral_get_sb(struct file_system_type *fs_type,
 -	int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
-+static struct super_block * nfs4_get_sb(struct file_system_type *fs_type,
++static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
 +	int flags, const char *dev_name, void *raw_data);
-+static struct super_block * nfs4_xdev_get_sb(struct file_system_type *fs_type,
++static struct super_block *nfs4_xdev_get_sb(struct file_system_type *fs_type,
 +	int flags, const char *dev_name, void *raw_data);
-+static struct super_block * nfs4_referral_get_sb(struct file_system_type *fs_type,
++static struct super_block *nfs4_referral_get_sb(struct file_system_type *fs_type,
 +	int flags, const char *dev_name, void *raw_data);
  static void nfs4_kill_super(struct super_block *sb);
  
  static struct file_system_type nfs4_fs_type = {
+@@ -274,7 +272,7 @@ struct file_system_type nfs4_referral_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
+ 
+-static const struct super_operations nfs4_sops = {
++static struct super_operations nfs4_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
 @@ -282,15 +280,11 @@ static const struct super_operations nfs4_sops = {
  	.clear_inode	= nfs4_clear_inode,
  	.umount_begin	= nfs_umount_begin,
@@ -1523,16 +1156,29 @@
  
  /*
   * Register the NFS filesystems
-@@ -311,7 +305,7 @@ int __init register_nfs_fs(void)
+@@ -299,7 +293,7 @@ int __init register_nfs_fs(void)
+ {
+ 	int ret;
+ 
+-        ret = register_filesystem(&nfs_fs_type);
++	ret = register_filesystem(&nfs_fs_type);
  	if (ret < 0)
+ 		goto error_0;
+ 
+@@ -311,7 +305,11 @@ int __init register_nfs_fs(void)
+ 	if (ret < 0)
  		goto error_2;
  #endif
 -	register_shrinker(&acl_shrinker);
++	ret = init_mnt_writers();
++	if (ret) 
++		printk(KERN_WARNING "Couldn't init mnt_writers\n");
++
 +	acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
  	return 0;
  
  #ifdef CONFIG_NFS_V4
-@@ -329,7 +323,8 @@ error_0:
+@@ -329,7 +327,8 @@ error_0:
   */
  void __exit unregister_nfs_fs(void)
  {
@@ -1542,7 +1188,7 @@
  #ifdef CONFIG_NFS_V4
  	unregister_filesystem(&nfs4_fs_type);
  #endif
-@@ -569,83 +564,6 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
+@@ -569,83 +568,6 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
  }
  
  /*
@@ -1626,268 +1272,27 @@
   * Begin unmount by attempting to remove all automounted mountpoints we added
   * in response to xdev traversals and referrals
   */
-@@ -708,7 +626,6 @@ static void nfs_parse_ipv4_address(char *string, size_t str_len,
- 				   struct sockaddr *sap, size_t *addr_len)
- {
- 	struct sockaddr_in *sin = (struct sockaddr_in *)sap;
--	u8 *addr = (u8 *)&sin->sin_addr.s_addr;
- 
- 	if (str_len <= INET_ADDRSTRLEN) {
- 		dfprintk(MOUNT, "NFS: parsing IPv4 address %*s\n",
-@@ -716,80 +633,21 @@ static void nfs_parse_ipv4_address(char *string, size_t str_len,
- 
- 		sin->sin_family = AF_INET;
- 		*addr_len = sizeof(*sin);
--		if (in4_pton(string, str_len, addr, '\0', NULL))
--			return;
-+		sin->sin_addr.s_addr = in_aton(string);
- 	}
- 
- 	sap->sa_family = AF_UNSPEC;
- 	*addr_len = 0;
+@@ -1942,8 +1864,8 @@ static int nfs_bdi_register(struct nfs_server *server)
+ 	return bdi_register_dev(&server->backing_dev_info, server->s_dev);
  }
  
--#define IPV6_SCOPE_DELIMITER	'%'
--
--#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
--static void nfs_parse_ipv6_scope_id(const char *string, const size_t str_len,
--				    const char *delim,
--				    struct sockaddr_in6 *sin6)
--{
--	char *p;
--	size_t len;
--
--	if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
--		return ;
--	if (*delim != IPV6_SCOPE_DELIMITER)
--		return;
--
--	len = (string + str_len) - delim - 1;
--	p = kstrndup(delim + 1, len, GFP_KERNEL);
--	if (p) {
--		unsigned long scope_id = 0;
--		struct net_device *dev;
--
--		dev = dev_get_by_name(&init_net, p);
--		if (dev != NULL) {
--			scope_id = dev->ifindex;
--			dev_put(dev);
--		} else {
--			/* scope_id is set to zero on error */
--			strict_strtoul(p, 10, &scope_id);
--		}
--
--		kfree(p);
--		sin6->sin6_scope_id = scope_id;
--		dfprintk(MOUNT, "NFS: IPv6 scope ID = %lu\n", scope_id);
--	}
--}
--
--static void nfs_parse_ipv6_address(char *string, size_t str_len,
--				   struct sockaddr *sap, size_t *addr_len)
--{
--	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
--	u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
--	const char *delim;
--
--	if (str_len <= INET6_ADDRSTRLEN) {
--		dfprintk(MOUNT, "NFS: parsing IPv6 address %*s\n",
--				(int)str_len, string);
--
--		sin6->sin6_family = AF_INET6;
--		*addr_len = sizeof(*sin6);
--		if (in6_pton(string, str_len, addr, IPV6_SCOPE_DELIMITER, &delim)) {
--			nfs_parse_ipv6_scope_id(string, str_len, delim, sin6);
--			return;
--		}
--	}
-+/* no IPV6 for now - jeff becker */
- 
--	sap->sa_family = AF_UNSPEC;
--	*addr_len = 0;
--}
--#else
- static void nfs_parse_ipv6_address(char *string, size_t str_len,
- 				   struct sockaddr *sap, size_t *addr_len)
- {
- 	sap->sa_family = AF_UNSPEC;
- 	*addr_len = 0;
- }
--#endif
- 
- /*
-  * Construct a sockaddr based on the contents of a string that contains
-@@ -929,7 +787,7 @@ static void nfs_parse_invalid_value(const char *option)
- static int nfs_parse_mount_options(char *raw,
- 				   struct nfs_parsed_mount_data *mnt)
- {
--	char *p, *string, *secdata;
-+	char *p, *string;
- 	int rc, sloppy = 0, errors = 0;
- 
- 	if (!raw) {
-@@ -938,20 +796,6 @@ static int nfs_parse_mount_options(char *raw,
- 	}
- 	dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw);
- 
--	secdata = alloc_secdata();
--	if (!secdata)
--		goto out_nomem;
--
--	rc = security_sb_copy_data(raw, secdata);
--	if (rc)
--		goto out_security_failure;
--
--	rc = security_sb_parse_opts_str(secdata, &mnt->lsm_opts);
--	if (rc)
--		goto out_security_failure;
--
--	free_secdata(secdata);
--
- 	while ((p = strsep(&raw, ",")) != NULL) {
- 		substring_t args[MAX_OPT_ARGS];
- 		int option, token;
-@@ -973,6 +817,12 @@ static int nfs_parse_mount_options(char *raw,
- 		case Opt_hard:
- 			mnt->flags &= ~NFS_MOUNT_SOFT;
- 			break;
-+		case Opt_intr:
-+			mnt->flags |= NFS_MOUNT_INTR;
-+			break;
-+		case Opt_nointr:
-+			mnt->flags &= ~NFS_MOUNT_INTR;
-+			break;
- 		case Opt_posix:
- 			mnt->flags |= NFS_MOUNT_POSIX;
- 			break;
-@@ -1284,10 +1134,6 @@ static int nfs_parse_mount_options(char *raw,
- out_nomem:
- 	printk(KERN_INFO "NFS: not enough memory to parse option\n");
- 	return 0;
--out_security_failure:
--	free_secdata(secdata);
--	printk(KERN_INFO "NFS: security options invalid: %d\n", rc);
--	return 0;
- }
- 
- /*
-@@ -1362,9 +1208,10 @@ static int nfs_parse_simple_hostname(const char *dev_name,
- 		goto out_hostname;
- 
- 	/* N.B. caller will free nfs_server.hostname in all cases */
--	*hostname = kstrndup(dev_name, len, GFP_KERNEL);
-+	*hostname = kzalloc(len, GFP_KERNEL);
- 	if (!*hostname)
- 		goto out_nomem;
-+	strncpy(*hostname, dev_name, len - 1);
- 
- 	/* kill possible hostname list: not supported */
- 	comma = strchr(*hostname, ',');
-@@ -1378,9 +1225,10 @@ static int nfs_parse_simple_hostname(const char *dev_name,
- 	len = strlen(colon);
- 	if (len > maxpathlen)
- 		goto out_path;
--	*export_path = kstrndup(colon, len, GFP_KERNEL);
-+	*export_path = kzalloc(len, GFP_KERNEL);
- 	if (!*export_path)
- 		goto out_nomem;
-+	strncpy(*export_path, colon, len - 1);
- 
- 	dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *export_path);
- 	return 0;
-@@ -1427,17 +1275,19 @@ static int nfs_parse_protected_hostname(const char *dev_name,
- 		goto out_hostname;
- 
- 	/* N.B. caller will free nfs_server.hostname in all cases */
--	*hostname = kstrndup(start, len, GFP_KERNEL);
-+	*hostname = kzalloc(len, GFP_KERNEL);
- 	if (*hostname == NULL)
- 		goto out_nomem;
-+	strncpy(*hostname, start, len - 1);
- 
- 	end += 2;
- 	len = strlen(end);
- 	if (len > maxpathlen)
- 		goto out_path;
--	*export_path = kstrndup(end, len, GFP_KERNEL);
-+	*export_path = kzalloc(len, GFP_KERNEL);
- 	if (!*export_path)
- 		goto out_nomem;
-+	strncpy(*export_path, end, len - 1);
- 
- 	return 0;
- 
-@@ -1581,32 +1431,6 @@ static int nfs_validate_mount_data(void *options,
- 		if (!args->nfs_server.hostname)
- 			goto out_nomem;
- 
--		/*
--		 * The legacy version 6 binary mount data from userspace has a
--		 * field used only to transport selinux information into the
--		 * the kernel.  To continue to support that functionality we
--		 * have a touch of selinux knowledge here in the NFS code. The
--		 * userspace code converted context=blah to just blah so we are
--		 * converting back to the full string selinux understands.
--		 */
--		if (data->context[0]){
--#ifdef CONFIG_SECURITY_SELINUX
--			int rc;
--			char *opts_str = kmalloc(sizeof(data->context) + 8, GFP_KERNEL);
--			if (!opts_str)
--				return -ENOMEM;
--			strcpy(opts_str, "context=");
--			data->context[NFS_MAX_CONTEXT_LEN] = '\0';
--			strcat(opts_str, &data->context[0]);
--			rc = security_sb_parse_opts_str(opts_str, &args->lsm_opts);
--			kfree(opts_str);
--			if (rc)
--				return rc;
--#else
--			return -EINVAL;
--#endif
--		}
--
- 		break;
- 	default: {
- 		int status;
-@@ -1798,7 +1622,7 @@ static void nfs_fill_super(struct super_block *sb,
- 		sb->s_time_gran = 1;
- 	}
- 
--	sb->s_op = &nfs_sops;
-+	sb->s_op = (struct super_operations *) &nfs_sops;
-  	nfs_initialise_sb(sb);
- }
- 
-@@ -1931,13 +1755,8 @@ static int nfs_compare_super(struct super_block *sb, void *data)
- 	return nfs_compare_mount_options(sb, server, mntflags);
- }
- 
--static int nfs_bdi_register(struct nfs_server *server)
--{
--	return bdi_register_dev(&server->backing_dev_info, server->s_dev);
--}
--
 -static int nfs_get_sb(struct file_system_type *fs_type,
 -	int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
-+static struct super_block * nfs_get_sb(struct file_system_type *fs_type,
++static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
 +	int flags, const char *dev_name, void *raw_data)
  {
  	struct nfs_server *server = NULL;
  	struct super_block *s;
-@@ -1950,22 +1769,25 @@ static int nfs_get_sb(struct file_system_type *fs_type,
+@@ -1955,6 +1877,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
+ 		.mntflags = flags,
  	};
  	int error = -ENOMEM;
- 
 +	s = ERR_PTR(error);
-+
+ 
  	data = kzalloc(sizeof(*data), GFP_KERNEL);
  	mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
- 	if (data == NULL || mntfh == NULL)
- 		goto out_free_fh;
+@@ -1965,13 +1888,16 @@ static int nfs_get_sb(struct file_system_type *fs_type,
  
--	security_init_mnt_opts(&data->lsm_opts);
--
  	/* Validate the mount data */
  	error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
 -	if (error < 0)
@@ -1904,18 +1309,30 @@
  		goto out;
  	}
  	sb_mntdata.server = server;
-@@ -1983,10 +1805,6 @@ static int nfs_get_sb(struct file_system_type *fs_type,
+@@ -1981,18 +1907,18 @@ static int nfs_get_sb(struct file_system_type *fs_type,
+ 
+ 	/* Get a superblock - note that we may end up sharing one that already exists */
+ 	s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
+-	if (IS_ERR(s)) {
+-		error = PTR_ERR(s);
++	if (IS_ERR(s))
+ 		goto out_err_nosb;
+-	}
+ 
  	if (s->s_fs_info != server) {
  		nfs_free_server(server);
  		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
+ 	} else {
+ 		error = nfs_bdi_register(server);
 -		if (error)
--			goto error_splat_super;
++		if (error) {
++			s = ERR_PTR(error);
+ 			goto error_splat_super;
++		}
  	}
  
  	if (!s->s_root) {
-@@ -1997,33 +1815,27 @@ static int nfs_get_sb(struct file_system_type *fs_type,
+@@ -2003,17 +1929,18 @@ static int nfs_get_sb(struct file_system_type *fs_type,
  	mntroot = nfs_get_root(s, mntfh);
  	if (IS_ERR(mntroot)) {
  		error = PTR_ERR(mntroot);
@@ -1923,21 +1340,22 @@
  		goto error_splat_super;
  	}
  
--	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+ 	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
 -	if (error)
--		goto error_splat_root;
--
++	if (error) {
++		s = ERR_PTR(error);
+ 		goto error_splat_root;
++	}
+ 
++	s->s_root = mntroot;
  	s->s_flags |= MS_ACTIVE;
 -	mnt->mnt_sb = s;
 -	mnt->mnt_root = mntroot;
-+/*	mnt->mnt_sb = s;
-+	mnt->mnt_root = mntroot; */
- 	error = 0;
+-	error = 0;
  
  out:
  	kfree(data->nfs_server.hostname);
- 	kfree(data->mount_server.hostname);
--	security_free_mnt_opts(&data->lsm_opts);
+@@ -2022,7 +1949,7 @@ out:
  out_free_fh:
  	kfree(mntfh);
  	kfree(data);
@@ -1946,34 +1364,28 @@
  
  out_err_nosb:
  	nfs_free_server(server);
- 	goto out;
- 
--error_splat_root:
--	dput(mntroot);
- error_splat_super:
- 	up_write(&s->s_umount);
- 	deactivate_super(s);
-@@ -2037,7 +1849,6 @@ static void nfs_kill_super(struct super_block *s)
- {
- 	struct nfs_server *server = NFS_SB(s);
- 
--	bdi_unregister(&server->backing_dev_info);
- 	kill_anon_super(s);
- 	nfs_free_server(server);
- }
-@@ -2045,9 +1856,8 @@ static void nfs_kill_super(struct super_block *s)
+@@ -2051,9 +1978,8 @@ static void nfs_kill_super(struct super_block *s)
  /*
   * Clone an NFS2/3 server record on xdev traversal (FSID-change)
   */
 -static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
 -			   const char *dev_name, void *raw_data,
 -			   struct vfsmount *mnt)
-+static struct super_block *  nfs_xdev_get_sb(struct file_system_type *fs_type, 
-+		int flags, const char *dev_name, void *raw_data)
++static struct super_block *nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
++			   const char *dev_name, void *raw_data)
  {
  	struct nfs_clone_mount *data = raw_data;
  	struct super_block *s;
-@@ -2065,6 +1875,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2063,7 +1989,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
+ 	struct nfs_sb_mountdata sb_mntdata = {
+ 		.mntflags = flags,
+ 	};
+-	int error;
++	int error = 0;
+ 
+ 	dprintk("--> nfs_xdev_get_sb()\n");
+ 
+@@ -2071,6 +1997,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
  	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr);
  	if (IS_ERR(server)) {
  		error = PTR_ERR(server);
@@ -1981,18 +1393,30 @@
  		goto out_err_noserver;
  	}
  	sb_mntdata.server = server;
-@@ -2082,10 +1893,6 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2080,18 +2007,18 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
+ 
+ 	/* Get a superblock - note that we may end up sharing one that already exists */
+ 	s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+-	if (IS_ERR(s)) {
+-		error = PTR_ERR(s);
++	if (IS_ERR(s))
+ 		goto out_err_nosb;
+-	}
+ 
  	if (s->s_fs_info != server) {
  		nfs_free_server(server);
  		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
+ 	} else {
+ 		error = nfs_bdi_register(server);
 -		if (error)
--			goto error_splat_super;
++		if (error) {
++			s = ERR_PTR(error);
+ 			goto error_splat_super;
++		}
  	}
  
  	if (!s->s_root) {
-@@ -2096,35 +1903,34 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2102,35 +2029,36 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
  	mntroot = nfs_get_root(s, data->fh);
  	if (IS_ERR(mntroot)) {
  		error = PTR_ERR(mntroot);
@@ -2006,15 +1430,14 @@
  		goto error_splat_super;
  	}
  
++	s->s_root = mntroot;
  	s->s_flags |= MS_ACTIVE;
 -	mnt->mnt_sb = s;
 -	mnt->mnt_root = mntroot;
--
--	/* clone any lsm security options from the parent to the new sb */
--	security_sb_clone_mnt_opts(data->sb, s);
-+/*	mnt->mnt_sb = s;
-+	mnt->mnt_root = mntroot; */
  
+ 	/* clone any lsm security options from the parent to the new sb */
+ 	security_sb_clone_mnt_opts(data->sb, s);
+ 
  	dprintk("<-- nfs_xdev_get_sb() = 0\n");
 -	return 0;
 +	return s;
@@ -2035,84 +1458,26 @@
  }
  
  #ifdef CONFIG_NFS_V4
-@@ -2149,10 +1955,32 @@ static void nfs4_clone_super(struct super_block *sb,
- static void nfs4_fill_super(struct super_block *sb)
- {
- 	sb->s_time_gran = 1;
--	sb->s_op = &nfs4_sops;
-+	sb->s_op = (struct super_operations *) &nfs4_sops;
- 	nfs_initialise_sb(sb);
- }
- 
-+static void *nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen
-+		)
-+{
-+        void *p = NULL;
-+
-+        if (!src->len)
-+                return ERR_PTR(-EINVAL);
-+        if (src->len < maxlen)
-+                maxlen = src->len;
-+        if (dst == NULL) {
-+                p = dst = kmalloc(maxlen + 1, GFP_KERNEL);
-+                if (p == NULL)
-+                        return ERR_PTR(-ENOMEM);
-+        }
-+        if (copy_from_user(dst, src->data, maxlen)) {
-+                kfree(p);
-+                return ERR_PTR(-EFAULT);
-+        }
-+        dst[maxlen] = '\0';
-+        return dst;
-+}
-+
+@@ -2296,8 +2224,8 @@ out_no_client_address:
  /*
-  * Validate NFSv4 mount options
-  */
-@@ -2200,18 +2028,18 @@ static int nfs4_validate_mount_data(void *options,
- 				return -EFAULT;
- 		}
- 
--		c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN);
-+		c = nfs_copy_user_string(NULL, &data->hostname, NFS4_MAXNAMLEN);
- 		if (IS_ERR(c))
- 			return PTR_ERR(c);
- 		args->nfs_server.hostname = c;
- 
--		c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN);
-+		c = nfs_copy_user_string(NULL, &data->mnt_path, NFS4_MAXPATHLEN);
- 		if (IS_ERR(c))
- 			return PTR_ERR(c);
- 		args->nfs_server.export_path = c;
- 		dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
- 
--		c = strndup_user(data->client_addr.data, 16);
-+		c = nfs_copy_user_string(NULL, &data->client_addr, 16);
- 		if (IS_ERR(c))
- 			return PTR_ERR(c);
- 		args->client_address = c;
-@@ -2290,8 +2118,8 @@ out_no_client_address:
- /*
   * Get the superblock for an NFS4 mountpoint
   */
 -static int nfs4_get_sb(struct file_system_type *fs_type,
 -	int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
-+static struct super_block * nfs4_get_sb(struct file_system_type *fs_type,
++static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
 +	int flags, const char *dev_name, void *raw_data)
  {
  	struct nfs_parsed_mount_data *data;
  	struct super_block *s;
-@@ -2306,20 +2134,23 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
+@@ -2309,6 +2237,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
+ 		.mntflags = flags,
+ 	};
+ 	int error = -ENOMEM;
++	s = ERR_PTR(error);
  
  	data = kzalloc(sizeof(*data), GFP_KERNEL);
  	mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
--	if (data == NULL || mntfh == NULL)
-+	if (data == NULL || mntfh == NULL) {
-+		s = ERR_PTR(error);
- 		goto out_free_fh;
--
--	security_init_mnt_opts(&data->lsm_opts);
-+	}
+@@ -2319,13 +2248,16 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
  
  	/* Validate the mount data */
  	error = nfs4_validate_mount_data(raw_data, data, dev_name);
@@ -2130,18 +1495,30 @@
  		goto out;
  	}
  	sb_mntdata.server = server;
-@@ -2337,10 +2168,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
+@@ -2335,18 +2267,18 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
+ 
+ 	/* Get a superblock - note that we may end up sharing one that already exists */
+ 	s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
+-	if (IS_ERR(s)) {
+-		error = PTR_ERR(s);
++	if (IS_ERR(s))
+ 		goto out_free;
+-	}
+ 
  	if (s->s_fs_info != server) {
  		nfs_free_server(server);
  		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
+ 	} else {
+ 		error = nfs_bdi_register(server);
 -		if (error)
--			goto error_splat_super;
++		if (error) {
++			s = ERR_PTR(error);
+ 			goto error_splat_super;
++		}
  	}
  
  	if (!s->s_root) {
-@@ -2351,34 +2178,28 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
+@@ -2357,17 +2289,18 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
  	mntroot = nfs4_get_root(s, mntfh);
  	if (IS_ERR(mntroot)) {
  		error = PTR_ERR(mntroot);
@@ -2149,22 +1526,22 @@
  		goto error_splat_super;
  	}
  
--	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+ 	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
 -	if (error)
--		goto error_splat_root;
--
++	if (error) {
++		s = ERR_PTR(error);
+ 		goto error_splat_root;
++	}
+ 
++	s->s_root = mntroot;
  	s->s_flags |= MS_ACTIVE;
 -	mnt->mnt_sb = s;
 -	mnt->mnt_root = mntroot;
-+/*	mnt->mnt_sb = s;
-+	mnt->mnt_root = mntroot; */
- 	error = 0;
+-	error = 0;
  
  out:
  	kfree(data->client_address);
- 	kfree(data->nfs_server.export_path);
- 	kfree(data->nfs_server.hostname);
--	security_free_mnt_opts(&data->lsm_opts);
+@@ -2377,7 +2310,7 @@ out:
  out_free_fh:
  	kfree(mntfh);
  	kfree(data);
@@ -2173,26 +1550,28 @@
  
  out_free:
  	nfs_free_server(server);
- 	goto out;
- 
--error_splat_root:
--	dput(mntroot);
- error_splat_super:
- 	up_write(&s->s_umount);
- 	deactivate_super(s);
-@@ -2399,9 +2220,8 @@ static void nfs4_kill_super(struct super_block *sb)
+@@ -2405,9 +2338,8 @@ static void nfs4_kill_super(struct super_block *sb)
  /*
   * Clone an NFS4 server record on xdev traversal (FSID-change)
   */
 -static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
 -			    const char *dev_name, void *raw_data,
 -			    struct vfsmount *mnt)
-+static struct super_block * nfs4_xdev_get_sb(struct file_system_type *fs_type, 
-+		int flags, const char *dev_name, void *raw_data)
++static struct super_block *nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
++			    const char *dev_name, void *raw_data)
  {
  	struct nfs_clone_mount *data = raw_data;
  	struct super_block *s;
-@@ -2419,6 +2239,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2417,7 +2349,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
+ 	struct nfs_sb_mountdata sb_mntdata = {
+ 		.mntflags = flags,
+ 	};
+-	int error;
++	int error = 0;
+ 
+ 	dprintk("--> nfs4_xdev_get_sb()\n");
+ 
+@@ -2425,6 +2357,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
  	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr);
  	if (IS_ERR(server)) {
  		error = PTR_ERR(server);
@@ -2200,18 +1579,30 @@
  		goto out_err_noserver;
  	}
  	sb_mntdata.server = server;
-@@ -2436,10 +2257,6 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2434,18 +2367,18 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
+ 
+ 	/* Get a superblock - note that we may end up sharing one that already exists */
+ 	s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+-	if (IS_ERR(s)) {
+-		error = PTR_ERR(s);
++	if (IS_ERR(s)) 
+ 		goto out_err_nosb;
+-	}
+ 
  	if (s->s_fs_info != server) {
  		nfs_free_server(server);
  		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
+ 	} else {
+ 		error = nfs_bdi_register(server);
 -		if (error)
--			goto error_splat_super;
++		if (error) {
++			s = ERR_PTR(error);
+ 			goto error_splat_super;
++		}
  	}
  
  	if (!s->s_root) {
-@@ -2450,42 +2267,41 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2456,42 +2389,42 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
  	mntroot = nfs4_get_root(s, data->fh);
  	if (IS_ERR(mntroot)) {
  		error = PTR_ERR(mntroot);
@@ -2225,14 +1616,13 @@
  		goto error_splat_super;
  	}
  
++	s->s_root = mntroot;
  	s->s_flags |= MS_ACTIVE;
 -	mnt->mnt_sb = s;
 -	mnt->mnt_root = mntroot;
--
--	security_sb_clone_mnt_opts(data->sb, s);
-+/*	mnt->mnt_sb = s;
-+	mnt->mnt_root = mntroot; */
  
+ 	security_sb_clone_mnt_opts(data->sb, s);
+ 
  	dprintk("<-- nfs4_xdev_get_sb() = 0\n");
 -	return 0;
 +	return s;
@@ -2258,12 +1648,21 @@
 -static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
 -				const char *dev_name, void *raw_data,
 -				struct vfsmount *mnt)
-+static struct super_block * nfs4_referral_get_sb(struct file_system_type *fs_type, 
-+		int flags, const char *dev_name, void *raw_data)
++static struct super_block *nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
++				const char *dev_name, void *raw_data)
  {
  	struct nfs_clone_mount *data = raw_data;
  	struct super_block *s;
-@@ -2504,6 +2320,7 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2502,7 +2435,7 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
+ 	struct nfs_sb_mountdata sb_mntdata = {
+ 		.mntflags = flags,
+ 	};
+-	int error;
++	int error = 0;
+ 
+ 	dprintk("--> nfs4_referral_get_sb()\n");
+ 
+@@ -2510,6 +2443,7 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
  	server = nfs4_create_referral_server(data, &mntfh);
  	if (IS_ERR(server)) {
  		error = PTR_ERR(server);
@@ -2271,18 +1670,30 @@
  		goto out_err_noserver;
  	}
  	sb_mntdata.server = server;
-@@ -2521,10 +2338,6 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2519,18 +2453,18 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
+ 
+ 	/* Get a superblock - note that we may end up sharing one that already exists */
+ 	s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+-	if (IS_ERR(s)) {
+-		error = PTR_ERR(s);
++	if (IS_ERR(s))
+ 		goto out_err_nosb;
+-	}
+ 
  	if (s->s_fs_info != server) {
  		nfs_free_server(server);
  		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
+ 	} else {
+ 		error = nfs_bdi_register(server);
 -		if (error)
--			goto error_splat_super;
++		if (error) {
++			s = ERR_PTR(error);
+ 			goto error_splat_super;
++		}
  	}
  
  	if (!s->s_root) {
-@@ -2535,34 +2348,34 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
+@@ -2541,34 +2475,35 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
  	mntroot = nfs4_get_root(s, &mntfh);
  	if (IS_ERR(mntroot)) {
  		error = PTR_ERR(mntroot);
@@ -2296,14 +1707,13 @@
  		goto error_splat_super;
  	}
  
++	s->s_root = mntroot;
  	s->s_flags |= MS_ACTIVE;
 -	mnt->mnt_sb = s;
 -	mnt->mnt_root = mntroot;
--
--	security_sb_clone_mnt_opts(data->sb, s);
-+/*	mnt->mnt_sb = s;
-+	mnt->mnt_root = mntroot; */
  
+ 	security_sb_clone_mnt_opts(data->sb, s);
+ 
  	dprintk("<-- nfs4_referral_get_sb() = 0\n");
 -	return 0;
 +	return s;
@@ -2324,345 +1734,610 @@
  }
  
  #endif /* CONFIG_NFS_V4 */
-diff --git a/fs/nfs/read.c b/fs/nfs/read.c
-index 40d1798..32c5380 100644
---- a/fs/nfs/read.c
-+++ b/fs/nfs/read.c
-@@ -76,7 +76,7 @@ void nfs_readdata_release(void *data)
- static
- int nfs_return_empty_page(struct page *page)
- {
--	zero_user(page, 0, PAGE_CACHE_SIZE);
-+	memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
- 	SetPageUptodate(page);
- 	unlock_page(page);
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 412738d..b17f14a 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -70,7 +70,7 @@ read_failed:
+ /*
+  * symlinks can't do much...
+  */
+-const struct inode_operations nfs_symlink_inode_operations = {
++struct inode_operations nfs_symlink_inode_operations = {
+ 	.readlink	= generic_readlink,
+ 	.follow_link	= nfs_follow_link,
+ 	.put_link	= page_put_link,
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 3229e21..ba7c0c2 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -13,7 +13,7 @@
+ #include <linux/file.h>
+ #include <linux/writeback.h>
+ #include <linux/swap.h>
+-
++#include <linux/mpage.h>
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/nfs_mount.h>
+@@ -410,7 +410,7 @@ nfs_mark_request_commit(struct nfs_page *req)
+ 			req->wb_index,
+ 			NFS_PAGE_TAG_COMMIT);
+ 	spin_unlock(&inode->i_lock);
+-	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
++	inc_page_state(nr_unstable);
+ 	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
+ 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ }
+@@ -421,7 +421,7 @@ nfs_clear_request_commit(struct nfs_page *req)
+ 	struct page *page = req->wb_page;
+ 
+ 	if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
+-		dec_zone_page_state(page, NR_UNSTABLE_NFS);
++		dec_page_state(nr_unstable);
+ 		dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
+ 		return 1;
+ 	}
+@@ -726,8 +726,8 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
+ 
+ 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name, count,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name, count,
+ 		(long long)(page_offset(page) + offset));
+ 
+ 	/* If we're not using byte range locks, and we know the page
+@@ -1263,7 +1263,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+ 		req = nfs_list_entry(head->next);
+ 		nfs_list_remove_request(req);
+ 		nfs_mark_request_commit(req);
+-		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
++		dec_page_state(nr_unstable);
+ 		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ 				BDI_RECLAIMABLE);
+ 		nfs_clear_page_tag_locked(req);
+@@ -1362,19 +1362,14 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
+ 	int nocommit = how & FLUSH_NOCOMMIT;
+ 	long pages, ret;
+ 
+-	/* FIXME */
+-	if (wbc->range_cyclic)
+-		idx_start = 0;
+-	else {
+-		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
+-		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
+-		if (idx_end > idx_start) {
+-			pgoff_t l_npages = 1 + idx_end - idx_start;
+-			npages = l_npages;
+-			if (sizeof(npages) != sizeof(l_npages) &&
+-					(pgoff_t)npages != l_npages)
+-				npages = 0;
+-		}
++	idx_start = wbc->start >> PAGE_CACHE_SHIFT;
++	idx_end = wbc->end >> PAGE_CACHE_SHIFT;
++	if (idx_end > idx_start) {
++		pgoff_t l_npages = 1 + idx_end - idx_start;
++		npages = l_npages;
++		if (sizeof(npages) != sizeof(l_npages) &&
++				(pgoff_t)npages != l_npages)
++			npages = 0;
+ 	}
+ 	how &= ~FLUSH_NOCOMMIT;
+ 	spin_lock(&inode->i_lock);
+@@ -1427,8 +1422,8 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
+ 		.bdi = mapping->backing_dev_info,
+ 		.sync_mode = WB_SYNC_NONE,
+ 		.nr_to_write = LONG_MAX,
+-		.range_start = 0,
+-		.range_end = LLONG_MAX,
++		.start = 0,
++		.end = LLONG_MAX,
+ 		.for_writepages = 1,
+ 	};
+ 	int ret;
+@@ -1461,8 +1457,8 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
+ 		.bdi = page->mapping->backing_dev_info,
+ 		.sync_mode = WB_SYNC_ALL,
+ 		.nr_to_write = LONG_MAX,
+-		.range_start = range_start,
+-		.range_end = range_end,
++		.start = range_start,
++		.end = range_end,
+ 	};
+ 	int ret = 0;
+ 
+@@ -1505,8 +1501,8 @@ static int nfs_wb_page_priority(struct inode *inode, struct page *page,
+ 		.bdi = page->mapping->backing_dev_info,
+ 		.sync_mode = WB_SYNC_ALL,
+ 		.nr_to_write = LONG_MAX,
+-		.range_start = range_start,
+-		.range_end = range_end,
++		.start = range_start,
++		.end = range_end,
+ 	};
+ 	int ret;
+ 
+diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
+index b112857..ff89acc 100644
+--- a/fs/nfs/nfs4namespace.c
++++ b/fs/nfs/nfs4namespace.c
+@@ -192,7 +192,7 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
+ 					mountdata.hostname,
+ 					mountdata.mnt_path);
+ 
+-			mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, page, &mountdata);
++			mnt = do_kern_mount("nfs4", 0, page, &mountdata);
+ 			if (!IS_ERR(mnt)) {
+ 				break;
+ 			}
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 9dc036f..860d944 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -168,14 +168,15 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 			goto out;
+ 
+ 		dprintk("Found the path %s\n", buf);
+-		key.ek_path = nd.path;
++		key.ek_path.dentry = nd.dentry;
++		key.ek_path.mnt = nd.mnt;
+ 
+ 		ek = svc_expkey_update(&key, ek);
+ 		if (ek)
+ 			cache_put(&ek->h, &svc_expkey_cache);
+ 		else
+ 			err = -ENOMEM;
+-		path_put(&nd.path);
++		backport_path_put(&nd);
+ 	}
+ 	cache_flush();
+  out:
+@@ -204,7 +205,7 @@ static int expkey_show(struct seq_file *m,
+ 	if (test_bit(CACHE_VALID, &h->flags) && 
+ 	    !test_bit(CACHE_NEGATIVE, &h->flags)) {
+ 		seq_printf(m, " ");
+-		seq_path(m, &ek->ek_path, "\\ \t\n");
++		seq_path(m, ek->ek_path.mnt, ek->ek_path.dentry, "\\ \t\n");
+ 	}
+ 	seq_printf(m, "\n");
  	return 0;
-@@ -100,10 +100,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
- 	pglen = PAGE_CACHE_SIZE - base;
- 	for (;;) {
- 		if (remainder <= pglen) {
--			zero_user(*pages, base, remainder);
-+			memclear_highpage_flush(*pages, base, remainder);
- 			break;
+@@ -346,7 +347,7 @@ static void svc_export_request(struct cache_detail *cd,
+ 	char *pth;
+ 
+ 	qword_add(bpp, blen, exp->ex_client->name);
+-	pth = d_path(&exp->ex_path, *bpp, *blen);
++	pth = d_path(exp->ex_path.dentry, exp->ex_path.mnt, *bpp, *blen);
+ 	if (IS_ERR(pth)) {
+ 		/* is this correct? */
+ 		(*bpp)[0] = '\n';
+@@ -385,7 +386,7 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
+ 	}
+ 
+ 	if (!inode->i_sb->s_export_op ||
+-	    !inode->i_sb->s_export_op->fh_to_dentry) {
++	    !inode->i_sb->s_export_op->get_dentry) {
+ 		dprintk("exp_export: export of invalid fs type.\n");
+ 		return -EINVAL;
+ 	}
+@@ -504,7 +505,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	struct svc_export exp, *expp;
+ 	int an_int;
+ 
+-	nd.path.dentry = NULL;
++	nd.dentry = NULL;
+ 	exp.ex_pathname = NULL;
+ 
+ 	/* fs locations */
+@@ -544,8 +545,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 
+ 	exp.h.flags = 0;
+ 	exp.ex_client = dom;
+-	exp.ex_path.mnt = nd.path.mnt;
+-	exp.ex_path.dentry = nd.path.dentry;
++	exp.ex_path.mnt = nd.mnt;
++	exp.ex_path.dentry = nd.dentry;
+ 	exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
+ 	err = -ENOMEM;
+ 	if (!exp.ex_pathname)
+@@ -607,7 +608,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 				goto out;
  		}
--		zero_user(*pages, base, pglen);
-+		memclear_highpage_flush(*pages, base, pglen);
- 		pages++;
- 		remainder -= pglen;
- 		pglen = PAGE_CACHE_SIZE;
-@@ -127,7 +127,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
- 		return PTR_ERR(new);
+ 
+-		err = check_export(nd.path.dentry->d_inode, exp.ex_flags,
++		err = check_export(nd.dentry->d_inode, exp.ex_flags,
+ 				   exp.ex_uuid);
+ 		if (err) goto out;
  	}
- 	if (len < PAGE_CACHE_SIZE)
--		zero_user_segment(page, len, PAGE_CACHE_SIZE);
-+		memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
+@@ -626,8 +627,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	nfsd4_fslocs_free(&exp.ex_fslocs);
+ 	kfree(exp.ex_uuid);
+ 	kfree(exp.ex_pathname);
+-	if (nd.path.dentry)
+-		path_put(&nd.path);
++	if (nd.dentry)
++		backport_path_put(&nd);
+  out_no_path:
+ 	if (dom)
+ 		auth_domain_put(dom);
+@@ -650,7 +651,7 @@ static int svc_export_show(struct seq_file *m,
+ 		return 0;
+ 	}
+ 	exp = container_of(h, struct svc_export, h);
+-	seq_path(m, &exp->ex_path, " \t\n\\");
++	seq_path(m, exp->ex_path.mnt, exp->ex_path.dentry, " \t\n\\");
+ 	seq_putc(m, '\t');
+ 	seq_escape(m, exp->ex_client->name, " \t\n\\");
+ 	seq_putc(m, '(');
+@@ -672,6 +673,7 @@ static int svc_export_show(struct seq_file *m,
+ 	seq_puts(m, ")\n");
+ 	return 0;
+ }
++
+ static int svc_export_match(struct cache_head *a, struct cache_head *b)
+ {
+ 	struct svc_export *orig = container_of(a, struct svc_export, h);
+@@ -1026,7 +1028,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto out_put_clp;
+ 	err = -EINVAL;
  
- 	nfs_list_add_request(new, &one_request);
- 	if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
-@@ -548,7 +548,7 @@ readpage_async_filler(void *data, struct page *page)
- 		goto out_error;
+-	exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
  
- 	if (len < PAGE_CACHE_SIZE)
--		zero_user_segment(page, len, PAGE_CACHE_SIZE);
-+		memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
- 	if (!nfs_pageio_add_request(desc->pgio, new)) {
- 		error = desc->pgio->pg_error;
- 		goto out_unlock;
-diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
-index 66df08d..7574738 100644
---- a/fs/nfs/namespace.c
-+++ b/fs/nfs/namespace.c
-@@ -107,29 +107,29 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 	memset(&new, 0, sizeof(new));
  
- 	BUG_ON(IS_ROOT(dentry));
- 	dprintk("%s: enter\n", __func__);
--	dput(nd->path.dentry);
--	nd->path.dentry = dget(dentry);
-+	dput(nd->dentry);
-+	nd->dentry = dget(dentry);
+@@ -1034,8 +1036,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if ((nxp->ex_flags & NFSEXP_FSID) &&
+ 	    (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
+ 	    fsid_key->ek_path.mnt &&
+-	    (fsid_key->ek_path.mnt != nd.path.mnt ||
+-	     fsid_key->ek_path.dentry != nd.path.dentry))
++	    (fsid_key->ek_path.mnt != nd.mnt ||
++	     fsid_key->ek_path.dentry != nd.dentry))
+ 		goto finish;
  
- 	/* Look it up again */
--	parent = dget_parent(nd->path.dentry);
-+	parent = dget_parent(nd->dentry);
- 	err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
--						  &nd->path.dentry->d_name,
-+						  &nd->dentry->d_name,
- 						  &fh, &fattr);
- 	dput(parent);
- 	if (err != 0)
- 		goto out_err;
+ 	if (!IS_ERR(exp)) {
+@@ -1051,7 +1053,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto finish;
+ 	}
  
- 	if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
--		mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
-+		mnt = nfs_do_refmount(nd->mnt, nd->dentry);
- 	else
--		mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
-+		mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh,
- 				      &fattr);
- 	err = PTR_ERR(mnt);
- 	if (IS_ERR(mnt))
- 		goto out_err;
+-	err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL);
++	err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL);
+ 	if (err) goto finish;
  
- 	mntget(mnt);
--	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
-+	err = do_add_mount(mnt, nd, nd->mnt->mnt_flags,
- 			   &nfs_automount_list);
- 	if (err < 0) {
- 		mntput(mnt);
-@@ -137,9 +137,10 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
- 			goto out_follow;
- 		goto out_err;
+ 	err = -ENOMEM;
+@@ -1064,7 +1066,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if (!new.ex_pathname)
+ 		goto finish;
+ 	new.ex_client = clp;
+-	new.ex_path = nd.path;
++	new.ex_path.mnt = nd.mnt;
++	new.ex_path.dentry = nd.dentry;
+ 	new.ex_flags = nxp->ex_flags;
+ 	new.ex_anon_uid = nxp->ex_anon_uid;
+ 	new.ex_anon_gid = nxp->ex_anon_gid;
+@@ -1090,7 +1093,7 @@ finish:
+ 		exp_put(exp);
+ 	if (fsid_key && !IS_ERR(fsid_key))
+ 		cache_put(&fsid_key->h, &svc_expkey_cache);
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ out_put_clp:
+ 	auth_domain_put(clp);
+ out_unlock:
+@@ -1143,8 +1146,8 @@ exp_unexport(struct nfsctl_export *nxp)
+ 		goto out_domain;
+ 
+ 	err = -EINVAL;
+-	exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
+-	path_put(&nd.path);
++	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
++	backport_path_put(&nd);
+ 	if (IS_ERR(exp))
+ 		goto out_domain;
+ 
+@@ -1180,12 +1183,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 		printk("nfsd: exp_rootfh path not found %s", path);
+ 		return err;
  	}
--	path_put(&nd->path);
--	nd->path.mnt = mnt;
--	nd->path.dentry = dget(mnt->mnt_root);
-+	dput(nd->dentry);
-+	mntput(nd->mnt);
-+	nd->mnt = mnt;
-+	nd->dentry = dget(mnt->mnt_root);
- 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+-	inode = nd.path.dentry->d_inode;
++	inode = nd.dentry->d_inode;
+ 
+ 	dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
+-		 path, nd.path.dentry, clp->name,
++		 path, nd.dentry, clp->name,
+ 		 inode->i_sb->s_id, inode->i_ino);
+-	exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
+ 	if (IS_ERR(exp)) {
+ 		err = PTR_ERR(exp);
+ 		goto out;
+@@ -1195,7 +1198,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	 * fh must be initialized before calling fh_compose
+ 	 */
+ 	fh_init(&fh, maxsize);
+-	if (fh_compose(&fh, exp, nd.path.dentry, NULL))
++	if (fh_compose(&fh, exp, nd.dentry, NULL))
+ 		err = -EINVAL;
+ 	else
+ 		err = 0;
+@@ -1203,7 +1206,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	fh_put(&fh);
+ 	exp_put(exp);
  out:
- 	dprintk("%s: done, returned %d\n", __func__, err);
-@@ -147,11 +148,12 @@ out:
- 	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
- 	return ERR_PTR(err);
- out_err:
--	path_put(&nd->path);
-+	dput(nd->dentry);
-+	mntput(nd->mnt);
- 	goto out;
- out_follow:
--	while (d_mountpoint(nd->path.dentry) &&
--	       follow_down(&nd->path.mnt, &nd->path.dentry))
-+	while (d_mountpoint(nd->dentry) &&
-+	       follow_down(&nd->mnt, &nd->dentry))
- 		;
- 	err = 0;
- 	goto out;
-diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
-index 1e750e4..a078225 100644
---- a/fs/nfs/nfs3proc.c
-+++ b/fs/nfs/nfs3proc.c
-@@ -27,14 +27,17 @@
- static int
- nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
- {
-+	sigset_t oldset;
- 	int res;
-+	rpc_clnt_sigmask(clnt, &oldset);
- 	do {
- 		res = rpc_call_sync(clnt, msg, flags);
- 		if (res != -EJUKEBOX)
- 			break;
--		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
-+		schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
- 		res = -ERESTARTSYS;
--	} while (!fatal_signal_pending(current));
-+	} while (!signalled());
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	return res;
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return err;
  }
  
-@@ -779,7 +782,7 @@ static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
- static int
- nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
+diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
+index 0766f95..db7ad3b 100644
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -500,7 +500,7 @@ nfsd_proc_readdir(struct svc_rqst *rqstp, struct nfsd_readdirargs *argp,
+ 	/* Read directory and encode entries on the fly */
+ 	offset = argp->cookie;
+ 	nfserr = nfsd_readdir(rqstp, &argp->fh, &offset, 
+-			      &resp->common, nfssvc_encode_entry);
++			      &resp->common, (filldir_t)nfssvc_encode_entry);
  
- 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ 	resp->count = resp->buffer - argp->buffer;
+ 	if (resp->offset)
+diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
+index 4d617ea..bce539f 100644
+--- a/fs/nfsd/nfs3proc.c
++++ b/fs/nfsd/nfs3proc.c
+@@ -449,7 +449,7 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+ 	resp->buffer = argp->buffer;
+ 	resp->rqstp = rqstp;
+ 	nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie, 
+-					&resp->common, nfs3svc_encode_entry);
++					&resp->common, (filldir_t)nfs3svc_encode_entry);
+ 	memcpy(resp->verf, argp->verf, 8);
+ 	resp->count = resp->buffer - argp->buffer;
+ 	if (resp->offset)
+@@ -491,7 +491,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
+ 	nfserr = nfsd_readdir(rqstp, &resp->fh,
+ 				     &offset,
+ 				     &resp->common,
+-				     nfs3svc_encode_entry_plus);
++				     (filldir_t)nfs3svc_encode_entry_plus);
+ 	memcpy(resp->verf, argp->verf, 8);
+ 	for (i=1; i<rqstp->rq_resused ; i++) {
+ 		page_addr = page_address(rqstp->rq_respages[i]);
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 145b3c8..425066f 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -121,9 +121,9 @@ out_no_tfm:
+ static void
+ nfsd4_sync_rec_dir(void)
+ {
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	nfsd_sync_dir(rec_dir.path.dentry);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	nfsd_sync_dir(rec_dir.dentry);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
  }
-diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index c910413..d89ea38 100644
---- a/fs/nfs/nfs4proc.c
-+++ b/fs/nfs/nfs4proc.c
-@@ -306,7 +306,8 @@ static void nfs4_opendata_free(struct kref *kref)
- 		nfs4_put_open_state(p->state);
- 	nfs4_put_state_owner(p->owner);
- 	dput(p->dir);
--	path_put(&p->path);
-+	dput(p->path.dentry);
-+	mntput(p->path.mnt);
- 	kfree(p);
- }
  
-@@ -318,9 +319,12 @@ static void nfs4_opendata_put(struct nfs4_opendata *p)
+ int
+@@ -143,9 +143,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 	nfs4_save_user(&uid, &gid);
  
- static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
- {
-+	sigset_t oldset;
- 	int ret;
+ 	/* lock the parent */
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
  
-+	rpc_clnt_sigmask(task->tk_client, &oldset);
- 	ret = rpc_wait_for_completion_task(task);
-+	rpc_clnt_sigunmask(task->tk_client, &oldset);
- 	return ret;
- }
+-	dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1);
++	dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		goto out_unlock;
+@@ -155,15 +155,15 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 		dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
+ 		goto out_put;
+ 	}
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out_put;
+-	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
+-	mnt_drop_write(rec_dir.path.mnt);
++	status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, rec_dir.mnt, S_IRWXU);
++	mnt_drop_write(rec_dir.mnt);
+ out_put:
+ 	dput(dentry);
+ out_unlock:
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (status == 0) {
+ 		clp->cl_firststate = 1;
+ 		nfsd4_sync_rec_dir();
+@@ -226,12 +226,12 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
  
-@@ -1209,7 +1213,8 @@ static void nfs4_free_closedata(void *data)
- 	nfs4_put_open_state(calldata->state);
- 	nfs_free_seqid(calldata->arg.seqid);
- 	nfs4_put_state_owner(sp);
--	path_put(&calldata->path);
-+	dput(calldata->path.dentry);
-+	mntput(calldata->path.mnt);
- 	kfree(calldata);
- }
+ 	nfs4_save_user(&uid, &gid);
  
-@@ -1384,7 +1389,7 @@ struct dentry *
- nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
- {
- 	struct path path = {
--		.mnt = nd->path.mnt,
-+		.mnt = nd->mnt,
- 		.dentry = dentry,
- 	};
- 	struct dentry *parent;
-@@ -1432,7 +1437,7 @@ int
- nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
- {
- 	struct path path = {
--		.mnt = nd->path.mnt,
-+		.mnt = nd->mnt,
- 		.dentry = dentry,
- 	};
- 	struct rpc_cred *cred;
-@@ -1880,7 +1885,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
-                  int flags, struct nameidata *nd)
- {
- 	struct path path = {
--		.mnt = nd->path.mnt,
-+		.mnt = nd->mnt,
- 		.dentry = dentry,
- 	};
- 	struct nfs4_state *state;
-@@ -2770,9 +2775,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
- 	return 0;
+-	filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY);
++	filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
+ 	status = PTR_ERR(filp);
+ 	if (IS_ERR(filp))
+ 		goto out;
+ 	INIT_LIST_HEAD(dentries);
+-	status = vfs_readdir(filp, nfsd4_build_dentrylist, &dla);
++	status = vfs_readdir(filp, (filldir_t)nfsd4_build_dentrylist, &dla);
+ 	fput(filp);
+ 	while (!list_empty(dentries)) {
+ 		child = list_entry(dentries->next, struct dentry_list, list);
+@@ -263,7 +263,7 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
+ 		return -EINVAL;
+ 	}
+ 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-	status = vfs_unlink(dir->d_inode, dentry);
++	status = vfs_unlink(dir->d_inode, dentry, rec_dir.mnt);
+ 	mutex_unlock(&dir->d_inode->i_mutex);
+ 	return status;
  }
+@@ -278,7 +278,7 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
+ 	 * a kernel from the future.... */
+ 	nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
+ 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-	status = vfs_rmdir(dir->d_inode, dentry);
++	status = vfs_rmdir(dir->d_inode, dentry, rec_dir.mnt);
+ 	mutex_unlock(&dir->d_inode->i_mutex);
+ 	return status;
+ }
+@@ -291,9 +291,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
  
--static int nfs4_wait_bit_killable(void *word)
-+static int nfs4_wait_bit_interruptible(void *word)
- {
--	if (fatal_signal_pending(current))
-+	if (signal_pending(current))
- 		return -ERESTARTSYS;
- 	schedule();
- 	return 0;
-@@ -2780,14 +2785,17 @@ static int nfs4_wait_bit_killable(void *word)
+ 	dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
  
- static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
- {
-+	sigset_t oldset;
- 	int res;
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	dentry = lookup_one_len(name, rec_dir.path.dentry, namlen);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	dentry = lookup_one_len(name, rec_dir.dentry, namlen);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		return status;
+@@ -302,7 +302,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+ 	if (!dentry->d_inode)
+ 		goto out;
  
- 	might_sleep();
+-	status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry);
++	status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
+ out:
+ 	dput(dentry);
+ 	return status;
+@@ -318,7 +318,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	if (!rec_dir_init || !clp->cl_firststate)
+ 		return;
  
- 	rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+ 	clp->cl_firststate = 0;
+@@ -327,7 +327,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	nfs4_reset_user(uid, gid);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("NFSD: Failed to remove expired client state directory"
+@@ -357,17 +357,17 @@ nfsd4_recdir_purge_old(void) {
  
-+	rpc_clnt_sigmask(clnt, &oldset);
- 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
--			nfs4_wait_bit_killable, TASK_KILLABLE);
-+			nfs4_wait_bit_interruptible, TASK_INTERRUPTIBLE);
-+	rpc_clnt_sigunmask(clnt, &oldset);
+ 	if (!rec_dir_init)
+ 		return;
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("nfsd4: failed to purge old clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ }
  
- 	rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
- 	return res;
-@@ -2795,6 +2803,7 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
+ static int
+@@ -387,10 +387,10 @@ int
+ nfsd4_recdir_load(void) {
+ 	int status;
  
- static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
- {
-+	sigset_t oldset;
- 	int res = 0;
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir);
+ 	if (status)
+ 		printk("nfsd4: failed loading clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ 	return status;
+ }
  
- 	might_sleep();
-@@ -2803,9 +2812,14 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
- 		*timeout = NFS4_POLL_RETRY_MIN;
- 	if (*timeout > NFS4_POLL_RETRY_MAX)
- 		*timeout = NFS4_POLL_RETRY_MAX;
--	schedule_timeout_killable(*timeout);
--	if (fatal_signal_pending(current))
--		res = -ERESTARTSYS;
-+	rpc_clnt_sigmask(clnt, &oldset);
-+	if (clnt->cl_intr) {
-+		schedule_timeout_interruptible(*timeout);
-+		if (signalled())
-+			res = -ERESTARTSYS;
-+	} else
-+		schedule_timeout_uninterruptible(*timeout);
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	*timeout <<= 1;
- 	return res;
+@@ -429,5 +429,5 @@ nfsd4_shutdown_recdir(void)
+ 	if (!rec_dir_init)
+ 		return;
+ 	rec_dir_init = 0;
+-	path_put(&rec_dir.path);
++	backport_path_put(&rec_dir);
  }
-@@ -3044,7 +3058,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
- static unsigned long
- nfs4_set_lock_task_retry(unsigned long timeout)
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 1578d7a..1c6df07 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1576,7 +1576,7 @@ static __be32
+ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
  {
--	schedule_timeout_killable(timeout);
-+	schedule_timeout_interruptible(timeout);
- 	timeout <<= 1;
- 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
- 		return NFS4_LOCK_MAXTIMEOUT;
-diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
-index 3305acb..36a0aa8 100644
---- a/fs/nfs/nfs4renewd.c
-+++ b/fs/nfs/nfs4renewd.c
-@@ -127,13 +127,14 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
- void
- nfs4_renewd_prepare_shutdown(struct nfs_server *server)
- {
--	cancel_delayed_work(&server->nfs_client->cl_renewd);
-+	flush_scheduled_work();
- }
+ 	struct file *filp = stp->st_vfs_file;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	unsigned int share_access, new_writer;
+ 	__be32 status;
  
- void
- nfs4_kill_renewd(struct nfs_client *clp)
+@@ -1923,7 +1923,7 @@ search_close_lru(u32 st_id, int flags)
+ static inline int
+ nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
  {
--	cancel_delayed_work_sync(&clp->cl_renewd);
-+	cancel_delayed_work(&clp->cl_renewd);
-+	flush_scheduled_work();
+-	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode;
++	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_dentry->d_inode;
  }
  
- /*
-diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
-index 86147b0..148aebe 100644
---- a/fs/nfs/idmap.c
-+++ b/fs/nfs/idmap.c
-@@ -376,7 +376,7 @@ idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- static ssize_t
- idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
- {
--	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
-+	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
- 	struct idmap *idmap = (struct idmap *)rpci->private;
- 	struct idmap_msg im_in, *im = &idmap->idmap_im;
- 	struct idmap_hashtable *h;
-diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
-index 4dbb84d..c351a41 100644
---- a/fs/nfs/proc.c
-+++ b/fs/nfs/proc.c
-@@ -595,7 +595,7 @@ nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
  static int
- nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+@@ -2838,7 +2838,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	 * only the dentry:inode set.
+ 	 */
+ 	memset(&file, 0, sizeof (struct file));
+-	file.f_path.dentry = cstate->current_fh.fh_dentry;
++	file.f_dentry = cstate->current_fh.fh_dentry;
+ 
+ 	status = nfs_ok;
+ 	error = vfs_test_lock(&file, &file_lock);
+@@ -2934,7 +2934,7 @@ static int
+ check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
  {
+ 	struct file_lock **flpp;
 -	struct inode *inode = filp->f_path.dentry->d_inode;
 +	struct inode *inode = filp->f_dentry->d_inode;
+ 	int status = 0;
  
- 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ 	lock_kernel();
+@@ -3294,11 +3294,11 @@ nfs4_reset_recoverydir(char *recdir)
+ 	if (status)
+ 		return status;
+ 	status = -ENOTDIR;
+-	if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
++	if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
+ 		nfs4_set_recdir(recdir);
+ 		status = 0;
+ 	}
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return status;
  }
+ 
 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
-index c53e65f..cb7ba7e 100644
+index c53e65f..c359cbf 100644
 --- a/fs/nfsd/nfsctl.c
 +++ b/fs/nfsd/nfsctl.c
-@@ -121,7 +120,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+@@ -121,7 +121,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
  
  static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
  {
 -	ino_t ino =  file->f_path.dentry->d_inode->i_ino;
-+	ino_t ino =  file->f_dentry->d_inode->i_ino;
++	ino_t ino = file->f_dentry->d_inode->i_ino;
  	char *data;
  	ssize_t rv;
  
-@@ -360,9 +351,10 @@ static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+@@ -360,9 +360,9 @@ static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
  	if (error)
  		return error;
  
@@ -2670,12 +2345,11 @@
 +	error = nlmsvc_unlock_all_by_sb(nd.mnt->mnt_sb);
  
 -	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
++	backport_path_put(&nd);
  	return error;
  }
  
-@@ -809,37 +801,37 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
+@@ -809,37 +813,37 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
  static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
  {
  	static struct tree_descr nfsd_files[] = {
@@ -2734,71 +2408,38 @@
  }
  
  static struct file_system_type nfsd_fs_type = {
-@@ -857,9 +849,10 @@ static int create_proc_exports_entry(void)
- 	entry = proc_mkdir("fs/nfs", NULL);
- 	if (!entry)
- 		return -ENOMEM;
--	entry = proc_create("exports", 0, entry, &exports_operations);
-+	entry = create_proc_entry("exports", 0, entry);
- 	if (!entry)
- 		return -ENOMEM;
-+	entry->proc_fops = (struct file_operations *)&exports_operations;
- 	return 0;
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 80292ff..47eb160 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -574,3 +574,5 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 	nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
+ 	return 1;
  }
- #else /* CONFIG_PROC_FS */
-diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
-index 0766f95..d8d1049 100644
---- a/fs/nfsd/nfsproc.c
-+++ b/fs/nfsd/nfsproc.c
-@@ -227,7 +227,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
- 	nfserr = nfserr_exist;
- 	if (isdotent(argp->name, argp->len))
- 		goto done;
--	fh_lock_nested(dirfhp, I_MUTEX_PARENT);
-+	fh_lock(dirfhp);
- 	dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
- 	if (IS_ERR(dchild)) {
- 		nfserr = nfserrno(PTR_ERR(dchild));
-diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
-index ea37c96..c7b0fda 100644
---- a/fs/nfsd/nfsfh.c
-+++ b/fs/nfsd/nfsfh.c
-@@ -51,7 +51,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
- 		/* make sure parents give x permission to user */
- 		int err;
- 		parent = dget_parent(tdentry);
--		err = inode_permission(parent->d_inode, MAY_EXEC);
-+		err = permission(parent->d_inode, MAY_EXEC, NULL);
- 		if (err < 0) {
- 			dput(parent);
- 			break;
-@@ -186,8 +186,10 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
-                 * access control settings being in effect, we cannot
-                 * fix that case easily.
-                 */
--               current->cap_effective =
--                       cap_raise_nfsd_set(current->cap_effective,
 +
-+#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
-+               
-+               cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
-                                           current->cap_permitted);
-        } else {
-                error = nfsd_setuser_and_check_port(rqstp, exp);
++MODULE_LICENSE("Dual BSD/GPL");
 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index 18060be..79399df 100644
+index 18060be..8ffadd5 100644
 --- a/fs/nfsd/vfs.c
 +++ b/fs/nfsd/vfs.c
-@@ -369,7 +369,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
- 	/* Revoke setuid/setgid on chown */
- 	if (((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid) ||
- 	    ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid)) {
--		iap->ia_valid |= ATTR_KILL_PRIV;
-+		iap->ia_valid |= ATTR_KILL_SUID;
- 		if (iap->ia_valid & ATTR_MODE) {
- 			/* we're setting mode too, just clear the s*id bits */
- 			iap->ia_mode &= ~S_ISUID;
-@@ -388,7 +388,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+@@ -23,7 +23,6 @@
+ #include <linux/file.h>
+ #include <linux/mount.h>
+ #include <linux/major.h>
+-#include <linux/splice.h>
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -97,7 +96,7 @@ static struct raparm_hbucket	raparm_hash[RAPARM_HASH_SIZE];
+  */
+ int
+ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, 
+-		        struct svc_export **expp)
++			struct svc_export **expp)
+ {
+ 	struct svc_export *exp = *expp, *exp2 = NULL;
+ 	struct dentry *dentry = *dpp;
+@@ -388,7 +387,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
  	err = nfserr_notsync;
  	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
  		fh_lock(fhp);
@@ -2807,13 +2448,12 @@
  		err = nfserrno(host_err);
  		fh_unlock(fhp);
  	}
-@@ -408,11 +408,12 @@ out_nfserr:
+@@ -408,11 +407,11 @@ out_nfserr:
  #if defined(CONFIG_NFSD_V2_ACL) || \
      defined(CONFIG_NFSD_V3_ACL) || \
      defined(CONFIG_NFSD_V4)
 -static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
-+static ssize_t nfsd_getxattr(struct dentry *dentry, struct vfsmount *mnt, 
-+		char *key, void **buf)
++static ssize_t nfsd_getxattr(struct dentry *dentry, struct vfsmount *mnt, char *key, void **buf)
  {
  	ssize_t buflen;
  
@@ -2822,7 +2462,7 @@
  	if (buflen <= 0)
  		return buflen;
  
-@@ -420,13 +421,13 @@ static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
+@@ -420,13 +419,13 @@ static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
  	if (!*buf)
  		return -ENOMEM;
  
@@ -2838,7 +2478,7 @@
  {
  	int len;
  	size_t buflen;
-@@ -445,7 +446,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
+@@ -445,7 +444,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
  		goto out;
  	}
  
@@ -2847,38 +2487,22 @@
  out:
  	kfree(buf);
  	return error;
-@@ -458,6 +459,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	__be32 error;
- 	int host_error;
- 	struct dentry *dentry;
-+	struct vfsmount *mnt;
- 	struct inode *inode;
- 	struct posix_acl *pacl = NULL, *dpacl = NULL;
- 	unsigned int flags = 0;
-@@ -468,6 +470,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		return error;
- 
- 	dentry = fhp->fh_dentry;
-+	mnt = fhp->fh_export->ex_path.mnt;
- 	inode = dentry->d_inode;
- 	if (S_ISDIR(inode->i_mode))
- 		flags = NFS4_ACL_DIR;
-@@ -478,12 +481,12 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+@@ -478,12 +477,12 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
  	} else if (host_error < 0)
  		goto out_nfserr;
  
 -	host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
-+	host_error = set_nfsv4_acl_one(dentry, mnt, pacl, POSIX_ACL_XATTR_ACCESS);
++	host_error = set_nfsv4_acl_one(dentry, fhp->fh_export->ex_path.mnt, pacl, POSIX_ACL_XATTR_ACCESS);
  	if (host_error < 0)
  		goto out_release;
  
  	if (S_ISDIR(inode->i_mode))
 -		host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
-+		host_error = set_nfsv4_acl_one(dentry, mnt, dpacl, POSIX_ACL_XATTR_DEFAULT);
++		host_error = set_nfsv4_acl_one(dentry, fhp->fh_export->ex_path.mnt, dpacl, POSIX_ACL_XATTR_DEFAULT);
  
  out_release:
  	posix_acl_release(pacl);
-@@ -496,13 +499,13 @@ out_nfserr:
+@@ -496,13 +495,13 @@ out_nfserr:
  }
  
  static struct posix_acl *
@@ -2894,7 +2518,7 @@
  	if (!buflen)
  		buflen = -ENODATA;
  	if (buflen <= 0)
-@@ -514,14 +517,15 @@ _get_posix_acl(struct dentry *dentry, char *key)
+@@ -514,14 +513,15 @@ _get_posix_acl(struct dentry *dentry, char *key)
  }
  
  int
@@ -2912,7 +2536,7 @@
  	if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
  		pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
  	if (IS_ERR(pacl)) {
-@@ -531,7 +535,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_ac
+@@ -531,7 +531,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_ac
  	}
  
  	if (S_ISDIR(inode->i_mode)) {
@@ -2921,21 +2545,14 @@
  		if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
  			dpacl = NULL;
  		else if (IS_ERR(dpacl)) {
-@@ -697,7 +701,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 	 * locks on them because there is no way to know if the accesser has
- 	 * the lock.
- 	 */
--	if (S_ISREG((inode)->i_mode) && mandatory_lock(inode))
-+	if (S_ISREG((inode)->i_mode) && MANDATORY_LOCK(inode))
- 		goto out;
- 
- 	if (!inode->i_fop)
-@@ -766,10 +770,10 @@ static int
+@@ -765,11 +765,11 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
+ static int
  nfsd_sync(struct file *filp)
  {
-         int err;
+-        int err;
 -	struct inode *inode = filp->f_path.dentry->d_inode;
 -	dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name);
++	int err;
 +	struct inode *inode = filp->f_dentry->d_inode;
 +	dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
  	mutex_lock(&inode->i_mutex);
@@ -2944,9 +2561,15 @@
  	mutex_unlock(&inode->i_mutex);
  
  	return err;
-@@ -834,27 +838,21 @@ found:
-  * directly. They will be released after the sending has completed.
-  */
+@@ -828,53 +828,39 @@ found:
+ 	return ra;
+ }
+ 
+-/*
+- * Grab and keep cached pages associated with a file in the svc_rqst
+- * so that they can be passed to the network sendmsg/sendpage routines
+- * directly. They will be released after the sending has completed.
+- */
  static int
 -nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 -		  struct splice_desc *sd)
@@ -2959,28 +2582,39 @@
 -	struct page *page = buf->page;
 -	size_t size;
 -	int ret;
--
+ 
 -	ret = buf->ops->confirm(pipe, buf);
 -	if (unlikely(ret))
 -		return ret;
- 
+-
 -	size = sd->len;
 +	if (size > count)
 +		size = count;
  
  	if (rqstp->rq_res.page_len == 0) {
  		get_page(page);
- 		put_page(*pp);
- 		*pp = page;
- 		rqstp->rq_resused++;
+-		put_page(*pp);
+-		*pp = page;
+-		rqstp->rq_resused++;
 -		rqstp->rq_res.page_base = buf->offset;
++		if (*pp)
++			put_page(*pp);
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
 +		rqstp->rq_res.page_base = offset;
  		rqstp->rq_res.page_len = size;
- 	} else if (page != pp[-1]) {
+-	} else if (page != pp[-1]) {
++	} else if (page != rqstp->rq_respages[rqstp->rq_resused-1]) {
  		get_page(page);
-@@ -866,15 +864,11 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
- 	} else
+ 		if (*pp)
+ 			put_page(*pp);
+ 		*pp = page;
+-		rqstp->rq_resused++;
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
  		rqstp->rq_res.page_len += size;
+-	} else
++	} else {
+ 		rqstp->rq_res.page_len += size;
++	}
  
 +	desc->count = count - size;
 +	desc->written += size;
@@ -2996,7 +2630,16 @@
  static inline int svc_msnfs(struct svc_fh *ffhp)
  {
  #ifdef MSNFS
-@@ -895,7 +889,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -886,7 +872,7 @@ static inline int svc_msnfs(struct svc_fh *ffhp)
+ 
+ static __be32
+ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+-              loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
++	      loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
+ {
+ 	struct inode *inode;
+ 	struct raparms	*ra;
+@@ -895,7 +881,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
  	int		host_err;
  
  	err = nfserr_perm;
@@ -3005,7 +2648,7 @@
  
  	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
  		goto out;
-@@ -906,16 +900,10 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -906,16 +892,9 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
  	if (ra && ra->p_set)
  		file->f_ra = ra->p_ra;
  
@@ -3017,15 +2660,14 @@
 -			.u.data		= rqstp,
 -		};
 -
-+	if (file->f_op->sendfile && rqstp->rq_splice_ok) {
++	if (file->f_op->sendfile && rqstp->rq_sendfile_ok) {
  		rqstp->rq_resused = 1;
 -		host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
-+		host_err = file->f_op->sendfile(file, &offset, *count,
-+				nfsd_read_actor, rqstp);
++		host_err = file->f_op->sendfile(file, &offset, *count, nfsd_read_actor, rqstp);
  	} else {
  		oldfs = get_fs();
  		set_fs(KERNEL_DS);
-@@ -937,20 +925,20 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -937,20 +916,20 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
  		nfsdstats.io_read += host_err;
  		*count = host_err;
  		err = 0;
@@ -3041,8 +2683,7 @@
 +static void kill_suid(struct dentry *dentry, struct vfsmount *mnt)
  {
  	struct iattr	ia;
--	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
-+	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
+ 	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
  
  	mutex_lock(&dentry->d_inode->i_mutex);
 -	notify_change(dentry, &ia);
@@ -3050,7 +2691,7 @@
  	mutex_unlock(&dentry->d_inode->i_mutex);
  }
  
-@@ -971,11 +959,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -971,11 +950,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
  	err = nfserr_perm;
  
  	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
@@ -3064,7 +2705,7 @@
  	inode = dentry->d_inode;
  	exp   = fhp->fh_export;
  
-@@ -1004,12 +992,12 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -1004,12 +983,12 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
  	set_fs(oldfs);
  	if (host_err >= 0) {
  		nfsdstats.io_write += cnt;
@@ -3075,48 +2716,20 @@
  	/* clear setuid/setgid flag after write */
  	if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
 -		kill_suid(dentry);
-+		kill_suid(dentry, file->f_vfsmnt);
++		kill_suid(dentry, exp->ex_path.mnt);
  
  	if (host_err >= 0 && stable) {
  		static ino_t	last_ino;
-@@ -1030,13 +1018,13 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- 		if (EX_WGATHER(exp)) {
- 			if (atomic_read(&inode->i_writecount) > 1
- 			    || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
--				dprintk("nfsd: write defer %d\n", task_pid_nr(current));
-+				dprintk("nfsd: write defer %d\n", current->pid);
- 				msleep(10);
--				dprintk("nfsd: write resume %d\n", task_pid_nr(current));
-+				dprintk("nfsd: write resume %d\n", current->pid);
- 			}
- 
- 			if (inode->i_state & I_DIRTY) {
--				dprintk("nfsd: write sync %d\n", task_pid_nr(current));
-+				dprintk("nfsd: write sync %d\n", current->pid);
- 				host_err=nfsd_sync(file);
- 			}
- #if 0
-@@ -1215,7 +1203,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	 */
- 	if (!resfhp->fh_dentry) {
- 		/* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */
--		fh_lock_nested(fhp, I_MUTEX_PARENT);
-+		fh_lock(fhp);
- 		dchild = lookup_one_len(fname, dentry, flen);
- 		host_err = PTR_ERR(dchild);
- 		if (IS_ERR(dchild))
-@@ -1257,10 +1245,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		goto out;
- 	}
- 
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
--
- 	/*
- 	 * Get the dir op function pointer.
- 	 */
-@@ -1270,19 +1254,17 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+@@ -1129,7 +1108,7 @@ out:
+  */
+ __be32
+ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
+-               loff_t offset, unsigned long count)
++	       loff_t offset, unsigned long count)
+ {
+ 	struct file	*file;
+ 	__be32		err;
+@@ -1270,13 +1249,13 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
  		host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
  		break;
  	case S_IFDIR:
@@ -3131,258 +2744,83 @@
 +		host_err = vfs_mknod(dirp, dchild, fhp->fh_export->ex_path.mnt, iap->ia_mode, rdev);
  		break;
  	}
--	if (host_err < 0) {
--		mnt_drop_write(fhp->fh_export->ex_path.mnt);
-+	if (host_err < 0)
- 		goto out_nfserr;
--	}
- 
- 	if (EX_ISSYNC(fhp->fh_export)) {
- 		err = nfserrno(nfsd_sync_dir(dentry));
-@@ -1292,7 +1274,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	err2 = nfsd_create_setattr(rqstp, resfhp, iap);
- 	if (err2)
- 		err = err2;
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
- 	/*
- 	 * Update the file handle to get the new inode info.
- 	 */
-@@ -1345,7 +1326,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	err = nfserr_notdir;
- 	if(!dirp->i_op || !dirp->i_op->lookup)
- 		goto out;
--	fh_lock_nested(fhp, I_MUTEX_PARENT);
-+	fh_lock(fhp);
- 
- 	/*
- 	 * Compose the response file handle.
-@@ -1370,9 +1351,6 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		v_atime = verifier[1]&0x7fffffff;
- 	}
- 	
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
- 	if (dchild->d_inode) {
- 		err = 0;
- 
-@@ -1404,15 +1382,12 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		case NFS3_CREATE_GUARDED:
- 			err = nfserr_exist;
- 		}
--		mnt_drop_write(fhp->fh_export->ex_path.mnt);
- 		goto out;
- 	}
- 
- 	host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
--	if (host_err < 0) {
--		mnt_drop_write(fhp->fh_export->ex_path.mnt);
-+	if (host_err < 0)
- 		goto out_nfserr;
--	}
- 	if (created)
- 		*created = 1;
- 
-@@ -1437,7 +1412,6 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	if (err2)
- 		err = err2;
- 
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
- 	/*
- 	 * Update the filehandle to get the new inode info.
- 	 */
-@@ -1516,6 +1490,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	if (host_err < 0) {
+@@ -1316,7 +1295,7 @@ __be32
+ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		char *fname, int flen, struct iattr *iap,
+ 		struct svc_fh *resfhp, int createmode, u32 *verifier,
+-	        int *truncp, int *created)
++		int *truncp, int *created)
+ {
+ 	struct dentry	*dentry, *dchild = NULL;
+ 	struct inode	*dirp;
+@@ -1516,6 +1495,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
  	struct dentry	*dentry, *dnew;
  	__be32		err, cerr;
  	int		host_err;
-+	umode_t         mode;
++	umode_t		mode;
  
  	err = nfserr_noent;
  	if (!flen || !plen)
-@@ -1534,9 +1509,10 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	if (IS_ERR(dnew))
+@@ -1538,6 +1518,11 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	if (host_err)
  		goto out_nfserr;
  
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
-+	mode = S_IALLUGO;
-+	/* Only the MODE ATTRibute is even vaguely meaningful */
-+	if (iap && (iap->ia_valid & ATTR_MODE))
++	mode = S_IALLUGO; 
++	/* Only the MODE ATTRibute is even vaguely meaningful */ 
++	if (iap && (iap->ia_valid & ATTR_MODE)) 
 +		mode = iap->ia_mode & S_IALLUGO;
- 
++
  	if (unlikely(path[plen] != 0)) {
  		char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
-@@ -1545,11 +1521,14 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		if (path_alloced == NULL)
+@@ -1545,11 +1530,11 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
  		else {
  			strncpy(path_alloced, path, plen);
  			path_alloced[plen] = 0;
 -			host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced);
-+			host_err = vfs_symlink(dentry->d_inode, dnew, 
-+					fhp->fh_export->ex_path.mnt, 
-+					path_alloced, mode);
++			host_err = vfs_symlink(dentry->d_inode, dnew, fhp->fh_export->ex_path.mnt, path_alloced, mode);
  			kfree(path_alloced);
  		}
  	} else
 -		host_err = vfs_symlink(dentry->d_inode, dnew, path);
-+		host_err = vfs_symlink(dentry->d_inode, dnew, 
-+				fhp->fh_export->ex_path.mnt, path, mode);
++		host_err = vfs_symlink(dentry->d_inode, dnew, fhp->fh_export->ex_path.mnt, path, mode);
  
  	if (!host_err) {
  		if (EX_ISSYNC(fhp->fh_export))
-@@ -1558,8 +1537,6 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	err = nfserrno(host_err);
- 	fh_unlock(fhp);
- 
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
--
- 	cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
- 	dput(dnew);
- 	if (err==0) err = cerr;
-@@ -1598,7 +1575,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
- 	if (isdotent(name, len))
- 		goto out;
- 
--	fh_lock_nested(ffhp, I_MUTEX_PARENT);
-+	fh_lock(ffhp);
- 	ddir = ffhp->fh_dentry;
- 	dirp = ddir->d_inode;
- 
-@@ -1610,12 +1587,8 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
- 	dold = tfhp->fh_dentry;
- 	dest = dold->d_inode;
- 
--	host_err = mnt_want_write(tfhp->fh_export->ex_path.mnt);
--	if (host_err) {
--		err = nfserrno(host_err);
--		goto out_dput;
--	}
+@@ -1615,7 +1600,8 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
+ 		err = nfserrno(host_err);
+ 		goto out_dput;
+ 	}
 -	host_err = vfs_link(dold, dirp, dnew);
 +	host_err = vfs_link(dold, tfhp->fh_export->ex_path.mnt, dirp, 
 +			dnew, ffhp->fh_export->ex_path.mnt);
  	if (!host_err) {
  		if (EX_ISSYNC(ffhp->fh_export)) {
  			err = nfserrno(nfsd_sync_dir(ddir));
-@@ -1628,8 +1601,6 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
- 		else
- 			err = nfserrno(host_err);
- 	}
--	mnt_drop_write(tfhp->fh_export->ex_path.mnt);
--out_dput:
- 	dput(dnew);
- out_unlock:
- 	fh_unlock(ffhp);
-@@ -1712,19 +1683,15 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
- 	host_err = -EXDEV;
- 	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+@@ -1716,7 +1702,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
+ 	if (host_err)
  		goto out_dput_new;
--	host_err = mnt_want_write(ffhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_dput_new;
  
 -	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
-+	host_err = vfs_rename(fdir, odentry, ffhp->fh_export->ex_path.mnt, 
++	host_err = vfs_rename(fdir, odentry, ffhp->fh_export->ex_path.mnt , 
 +			tdir, ndentry, tfhp->fh_export->ex_path.mnt);
  	if (!host_err && EX_ISSYNC(tfhp->fh_export)) {
  		host_err = nfsd_sync_dir(tdentry);
  		if (!host_err)
- 			host_err = nfsd_sync_dir(fdentry);
- 	}
- 
--	mnt_drop_write(ffhp->fh_export->ex_path.mnt);
--
-  out_dput_new:
- 	dput(ndentry);
-  out_dput_old:
-@@ -1754,6 +1721,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 				char *fname, int flen)
- {
- 	struct dentry	*dentry, *rdentry;
-+	struct svc_export *exp;
- 	struct inode	*dirp;
- 	__be32		err;
- 	int		host_err;
-@@ -1765,9 +1733,10 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 	if (err)
- 		goto out;
- 
--	fh_lock_nested(fhp, I_MUTEX_PARENT);
-+	fh_lock(fhp);
- 	dentry = fhp->fh_dentry;
- 	dirp = dentry->d_inode;
-+	exp = fhp->fh_export;
- 
- 	rdentry = lookup_one_len(fname, dentry, flen);
- 	host_err = PTR_ERR(rdentry);
-@@ -1783,10 +1752,6 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 	if (!type)
- 		type = rdentry->d_inode->i_mode & S_IFMT;
- 
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
--
- 	if (type != S_IFDIR) { /* It's UNLINK */
- #ifdef MSNFS
- 		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-@@ -1794,20 +1759,18 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+@@ -1794,9 +1781,9 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
  			host_err = -EPERM;
  		} else
  #endif
 -		host_err = vfs_unlink(dirp, rdentry);
-+		host_err = vfs_unlink(dirp, rdentry, exp->ex_path.mnt);
++		host_err = vfs_unlink(dirp, rdentry, fhp->fh_export->ex_path.mnt);
  	} else { /* It's RMDIR */
 -		host_err = vfs_rmdir(dirp, rdentry);
-+		host_err = vfs_rmdir(dirp, rdentry, exp->ex_path.mnt);
++		host_err = vfs_rmdir(dirp, rdentry, fhp->fh_export->ex_path.mnt);
  	}
  
  	dput(rdentry);
- 
- 	if (host_err)
--		goto out_drop;
-+		goto out_nfserr;
- 	if (EX_ISSYNC(fhp->fh_export))
- 		host_err = nfsd_sync_dir(dentry);
- 
--out_drop:
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
- out_nfserr:
- 	err = nfserrno(host_err);
- out:
-@@ -1904,7 +1867,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- 		inode->i_mode,
- 		IS_IMMUTABLE(inode)?	" immut" : "",
- 		IS_APPEND(inode)?	" append" : "",
--		__mnt_is_readonly(exp->ex_path.mnt)?	" ro" : "");
-+		IS_RDONLY(inode)?	" ro" : "");
- 	dprintk("      owner %d/%d user %d/%d\n",
- 		inode->i_uid, inode->i_gid, current->fsuid, current->fsgid);
- #endif
-@@ -1916,7 +1879,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- 	if (!(acc & NFSD_MAY_LOCAL_ACCESS))
- 		if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
- 			if (exp_rdonly(rqstp, exp) ||
--			    __mnt_is_readonly(exp->ex_path.mnt))
-+			    IS_RDONLY(inode))
- 				return nfserr_rofs;
- 			if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
- 				return nfserr_perm;
-@@ -1953,12 +1916,12 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- 		return 0;
- 
- 	/* This assumes  NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
--	err = inode_permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC));
-+	err = permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC), NULL);
- 
- 	/* Allow read access to binaries even when mode 111 */
- 	if (err == -EACCES && S_ISREG(inode->i_mode) &&
- 	    acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE))
--		err = inode_permission(inode, MAY_EXEC);
-+		err = permission(inode, MAY_EXEC, NULL);
- 
- 	return err? nfserrno(err) : 0;
- }
-@@ -2036,7 +1999,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
+@@ -2036,7 +2023,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
  		return ERR_PTR(-EOPNOTSUPP);
  	}
  
@@ -3391,960 +2829,1044 @@
  	if (size < 0)
  		return ERR_PTR(size);
  
-@@ -2048,6 +2011,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
- int
- nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
- {
-+	struct vfsmount *mnt;
- 	struct inode *inode = fhp->fh_dentry->d_inode;
- 	char *name;
- 	void *value = NULL;
-@@ -2080,21 +2044,18 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
- 	} else
- 		size = 0;
- 
--	error = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (error)
--		goto getout;
-+	mnt = fhp->fh_export->ex_path.mnt;
+@@ -2084,12 +2071,12 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
+ 	if (error)
+ 		goto getout;
  	if (size)
 -		error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0);
-+		error = vfs_setxattr(fhp->fh_dentry, mnt, name, value, size, 0, NULL);
++		error = vfs_setxattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt, name, value, size, 0, NULL);
  	else {
  		if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
  			error = 0;
  		else {
 -			error = vfs_removexattr(fhp->fh_dentry, name);
-+			error = vfs_removexattr(fhp->fh_dentry, mnt, name, NULL);
++			error = vfs_removexattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt, name, NULL);
  			if (error == -ENODATA)
  				error = 0;
  		}
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 14ba4d9..0dad782 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1446,7 +1446,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
  	}
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ 	if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
+ 			| FATTR4_WORD0_SUPPORTED_ATTRS)) {
+-		err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
++		err = nfsd4_get_nfs4_acl(rqstp, dentry, exp->ex_path.mnt, &acl);
+ 		aclsupport = (err == 0);
+ 		if (bmval0 & FATTR4_WORD0_ACL) {
+ 			if (err == -EOPNOTSUPP)
+@@ -2383,7 +2383,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ 	offset = readdir->rd_cookie;
+ 	nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
+ 			      &offset,
+-			      &readdir->common, nfsd4_encode_dirent);
++			      &readdir->common, (filldir_t)nfsd4_encode_dirent);
+ 	if (nfserr == nfs_ok &&
+ 	    readdir->common.err == nfserr_toosmall &&
+ 	    readdir->buffer == page) 
+diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
+index 27e772c..d932fb1 100644
+--- a/include/linux/exportfs.h
++++ b/include/linux/exportfs.h
+@@ -89,85 +89,9 @@ struct fid {
+ 	};
+ };
  
- getout:
- 	kfree(value);
-diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
-index 9dc036f..469cb0e 100644
---- a/fs/nfsd/export.c
-+++ b/fs/nfsd/export.c
-@@ -64,8 +64,10 @@ static void expkey_put(struct kref *ref)
- 	struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
+-/**
+- * struct export_operations - for nfsd to communicate with file systems
+- * @encode_fh:      encode a file handle fragment from a dentry
+- * @fh_to_dentry:   find the implied object and get a dentry for it
+- * @fh_to_parent:   find the implied object's parent and get a dentry for it
+- * @get_name:       find the name for a given inode in a given directory
+- * @get_parent:     find the parent of a given directory
+- *
+- * See Documentation/filesystems/Exporting for details on how to use
+- * this interface correctly.
+- *
+- * encode_fh:
+- *    @encode_fh should store in the file handle fragment @fh (using at most
+- *    @max_len bytes) information that can be used by @decode_fh to recover the
+- *    file refered to by the &struct dentry @de.  If the @connectable flag is
+- *    set, the encode_fh() should store sufficient information so that a good
+- *    attempt can be made to find not only the file but also it's place in the
+- *    filesystem.   This typically means storing a reference to de->d_parent in
+- *    the filehandle fragment.  encode_fh() should return the number of bytes
+- *    stored or a negative error code such as %-ENOSPC
+- *
+- * fh_to_dentry:
+- *    @fh_to_dentry is given a &struct super_block (@sb) and a file handle
+- *    fragment (@fh, @fh_len). It should return a &struct dentry which refers
+- *    to the same file that the file handle fragment refers to.  If it cannot,
+- *    it should return a %NULL pointer if the file was found but no acceptable
+- *    &dentries were available, or an %ERR_PTR error code indicating why it
+- *    couldn't be found (e.g. %ENOENT or %ENOMEM).  Any suitable dentry can be
+- *    returned including, if necessary, a new dentry created with d_alloc_root.
+- *    The caller can then find any other extant dentries by following the
+- *    d_alias links.
+- *
+- * fh_to_parent:
+- *    Same as @fh_to_dentry, except that it returns a pointer to the parent
+- *    dentry if it was encoded into the filehandle fragment by @encode_fh.
+- *
+- * get_name:
+- *    @get_name should find a name for the given @child in the given @parent
+- *    directory.  The name should be stored in the @name (with the
+- *    understanding that it is already pointing to a a %NAME_MAX+1 sized
+- *    buffer.   get_name() should return %0 on success, a negative error code
+- *    or error.  @get_name will be called without @parent->i_mutex held.
+- *
+- * get_parent:
+- *    @get_parent should find the parent directory for the given @child which
+- *    is also a directory.  In the event that it cannot be found, or storage
+- *    space cannot be allocated, a %ERR_PTR should be returned.
+- *
+- * Locking rules:
+- *    get_parent is called with child->d_inode->i_mutex down
+- *    get_name is not (which is possibly inconsistent)
+- */
+-
+-struct export_operations {
+-	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+-			int connectable);
+-	struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	int (*get_name)(struct dentry *parent, char *name,
+-			struct dentry *child);
+-	struct dentry * (*get_parent)(struct dentry *child);
+-};
+-
+ extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
+ 	int *max_len, int connectable);
+ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+ 	void *context);
+-
+-/*
+- * Generic helpers for filesystems.
+- */
+-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-
+ #endif /* LINUX_EXPORTFS_H */
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index dbb87ab..9236e80 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -230,7 +230,7 @@ int           nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
  
- 	if (test_bit(CACHE_VALID, &key->h.flags) &&
--	    !test_bit(CACHE_NEGATIVE, &key->h.flags))
--		path_put(&key->ek_path);
-+	    !test_bit(CACHE_NEGATIVE, &key->h.flags)) {
-+		dput(key->ek_path.dentry);
-+		mntput(key->ek_path.mnt);
-+	}
- 	auth_domain_put(key->ek_client);
- 	kfree(key);
+ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
+ {
+-	return file->f_file->f_path.dentry->d_inode;
++	return file->f_file->f_dentry->d_inode;
  }
-@@ -168,14 +170,16 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
- 			goto out;
  
- 		dprintk("Found the path %s\n", buf);
--		key.ek_path = nd.path;
-+		key.ek_path.mnt = nd.mnt;
-+		key.ek_path.dentry = nd.dentry;
+ /*
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 78a5922..e59d828 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -9,6 +9,7 @@
+ #ifndef _LINUX_NFS_FS_H
+ #define _LINUX_NFS_FS_H
  
- 		ek = svc_expkey_update(&key, ek);
- 		if (ek)
- 			cache_put(&ek->h, &svc_expkey_cache);
- 		else
- 			err = -ENOMEM;
--		path_put(&nd.path);
-+		dput(nd.dentry);
-+		mntput(nd.mnt);
- 	}
- 	cache_flush();
-  out:
-@@ -204,7 +208,7 @@ static int expkey_show(struct seq_file *m,
- 	if (test_bit(CACHE_VALID, &h->flags) && 
- 	    !test_bit(CACHE_NEGATIVE, &h->flags)) {
- 		seq_printf(m, " ");
--		seq_path(m, &ek->ek_path, "\\ \t\n");
-+		seq_path(m, ek->ek_path.mnt, ek->ek_path.dentry, "\\ \t\n");
- 	}
- 	seq_printf(m, "\n");
- 	return 0;
-@@ -241,8 +245,8 @@ static inline void expkey_update(struct cache_head *cnew,
- 	struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
- 	struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
++#include <linux/path.h>
+ #include <linux/magic.h>
  
--	new->ek_path = item->ek_path;
--	path_get(&item->ek_path);
-+	new->ek_path.mnt = mntget(item->ek_path.mnt);
-+	new->ek_path.dentry = dget(item->ek_path.dentry);
- }
+ /* Default timeout values */
+@@ -331,7 +332,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+ extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-extern int nfs_permission(struct inode *, int);
++extern int nfs_permission(struct inode *, int, struct nameidata *);
+ extern int nfs_open(struct inode *, struct file *);
+ extern int nfs_release(struct inode *, struct file *);
+ extern int nfs_attribute_timeout(struct inode *inode);
+@@ -358,9 +359,9 @@ static inline void nfs_fattr_init(struct nfs_fattr *fattr)
+ /*
+  * linux/fs/nfs/file.c
+  */
+-extern const struct inode_operations nfs_file_inode_operations;
++extern struct inode_operations nfs_file_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_file_inode_operations;
++extern struct inode_operations nfs3_file_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_file_operations;
+ extern const struct address_space_operations nfs_file_aops;
+@@ -408,9 +409,9 @@ extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ /*
+  * linux/fs/nfs/dir.c
+  */
+-extern const struct inode_operations nfs_dir_inode_operations;
++extern struct inode_operations nfs_dir_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_dir_inode_operations;
++extern struct inode_operations nfs3_dir_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_dir_operations;
+ extern struct dentry_operations nfs_dentry_operations;
+@@ -423,7 +424,7 @@ extern void nfs_access_zap_cache(struct inode *inode);
+ /*
+  * linux/fs/nfs/symlink.c
+  */
+-extern const struct inode_operations nfs_symlink_inode_operations;
++extern struct inode_operations nfs_symlink_inode_operations;
  
- static struct cache_head *expkey_alloc(void)
-@@ -330,7 +334,8 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
- static void svc_export_put(struct kref *ref)
- {
- 	struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
--	path_put(&exp->ex_path);
-+	dput(exp->ex_path.dentry);
-+	mntput(exp->ex_path.mnt);
- 	auth_domain_put(exp->ex_client);
- 	kfree(exp->ex_pathname);
- 	nfsd4_fslocs_free(&exp->ex_fslocs);
-@@ -346,7 +351,7 @@ static void svc_export_request(struct cache_detail *cd,
- 	char *pth;
+ /*
+  * linux/fs/nfs/sysctl.c
+@@ -439,8 +440,8 @@ extern void nfs_unregister_sysctl(void);
+ /*
+  * linux/fs/nfs/namespace.c
+  */
+-extern const struct inode_operations nfs_mountpoint_inode_operations;
+-extern const struct inode_operations nfs_referral_inode_operations;
++extern struct inode_operations nfs_mountpoint_inode_operations;
++extern struct inode_operations nfs_referral_inode_operations;
+ extern int nfs_mountpoint_expiry_timeout;
+ extern void nfs_release_automount_timer(void);
  
- 	qword_add(bpp, blen, exp->ex_client->name);
--	pth = d_path(&exp->ex_path, *bpp, *blen);
-+	pth = d_path(exp->ex_path.dentry, exp->ex_path.mnt, *bpp, *blen);
- 	if (IS_ERR(pth)) {
- 		/* is this correct? */
- 		(*bpp)[0] = '\n';
-@@ -504,7 +509,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
- 	struct svc_export exp, *expp;
- 	int an_int;
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 8c77c11..d9007dc 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -782,8 +782,8 @@ struct nfs_access_entry;
+ struct nfs_rpc_ops {
+ 	u32	version;		/* Protocol version */
+ 	struct dentry_operations *dentry_ops;
+-	const struct inode_operations *dir_inode_ops;
+-	const struct inode_operations *file_inode_ops;
++	struct inode_operations *dir_inode_ops;
++	struct inode_operations *file_inode_ops;
  
--	nd.path.dentry = NULL;
-+	nd.dentry = NULL;
- 	exp.ex_pathname = NULL;
+ 	int	(*getroot) (struct nfs_server *, struct nfs_fh *,
+ 			    struct nfs_fsinfo *);
+diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
+index 5431512..3753e4b 100644
+--- a/include/linux/nfsd/export.h
++++ b/include/linux/nfsd/export.h
+@@ -15,6 +15,7 @@
+ # include <linux/types.h>
+ # include <linux/in.h>
+ #endif
++#include <linux/path.h>
  
- 	/* fs locations */
-@@ -544,8 +549,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ /*
+  * Important limits for the exports stuff.
+diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
+index 108f47e..2389a2e 100644
+--- a/include/linux/nfsd/nfsd.h
++++ b/include/linux/nfsd/nfsd.h
+@@ -85,7 +85,8 @@ __be32		nfsd_setattr(struct svc_rqst *, struct svc_fh *,
+ #ifdef CONFIG_NFSD_V4
+ __be32          nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
+                     struct nfs4_acl *);
+-int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
++int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, 
++		struct vfsmount *mnt, struct nfs4_acl **);
+ #endif /* CONFIG_NFSD_V4 */
+ __be32		nfsd_create(struct svc_rqst *, struct svc_fh *,
+ 				char *name, int len, struct iattr *attrs,
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+deleted file mode 100644
+index 8e41202..0000000
+--- a/include/linux/pipe_fs_i.h
++++ /dev/null
+@@ -1,151 +0,0 @@
+-#ifndef _LINUX_PIPE_FS_I_H
+-#define _LINUX_PIPE_FS_I_H
+-
+-#define PIPEFS_MAGIC 0x50495045
+-
+-#define PIPE_BUFFERS (16)
+-
+-#define PIPE_BUF_FLAG_LRU	0x01	/* page is on the LRU */
+-#define PIPE_BUF_FLAG_ATOMIC	0x02	/* was atomically mapped */
+-#define PIPE_BUF_FLAG_GIFT	0x04	/* page is a gift */
+-
+-/**
+- *	struct pipe_buffer - a linux kernel pipe buffer
+- *	@page: the page containing the data for the pipe buffer
+- *	@offset: offset of data inside the @page
+- *	@len: length of data inside the @page
+- *	@ops: operations associated with this buffer. See @pipe_buf_operations.
+- *	@flags: pipe buffer flags. See above.
+- *	@private: private data owned by the ops.
+- **/
+-struct pipe_buffer {
+-	struct page *page;
+-	unsigned int offset, len;
+-	const struct pipe_buf_operations *ops;
+-	unsigned int flags;
+-	unsigned long private;
+-};
+-
+-/**
+- *	struct pipe_inode_info - a linux kernel pipe
+- *	@wait: reader/writer wait point in case of empty/full pipe
+- *	@nrbufs: the number of non-empty pipe buffers in this pipe
+- *	@curbuf: the current pipe buffer entry
+- *	@tmp_page: cached released page
+- *	@readers: number of current readers of this pipe
+- *	@writers: number of current writers of this pipe
+- *	@waiting_writers: number of writers blocked waiting for room
+- *	@r_counter: reader counter
+- *	@w_counter: writer counter
+- *	@fasync_readers: reader side fasync
+- *	@fasync_writers: writer side fasync
+- *	@inode: inode this pipe is attached to
+- *	@bufs: the circular array of pipe buffers
+- **/
+-struct pipe_inode_info {
+-	wait_queue_head_t wait;
+-	unsigned int nrbufs, curbuf;
+-	struct page *tmp_page;
+-	unsigned int readers;
+-	unsigned int writers;
+-	unsigned int waiting_writers;
+-	unsigned int r_counter;
+-	unsigned int w_counter;
+-	struct fasync_struct *fasync_readers;
+-	struct fasync_struct *fasync_writers;
+-	struct inode *inode;
+-	struct pipe_buffer bufs[PIPE_BUFFERS];
+-};
+-
+-/*
+- * Note on the nesting of these functions:
+- *
+- * ->confirm()
+- *	->steal()
+- *	...
+- *	->map()
+- *	...
+- *	->unmap()
+- *
+- * That is, ->map() must be called on a confirmed buffer,
+- * same goes for ->steal(). See below for the meaning of each
+- * operation. Also see kerneldoc in fs/pipe.c for the pipe
+- * and generic variants of these hooks.
+- */
+-struct pipe_buf_operations {
+-	/*
+-	 * This is set to 1, if the generic pipe read/write may coalesce
+-	 * data into an existing buffer. If this is set to 0, a new pipe
+-	 * page segment is always used for new data.
+-	 */
+-	int can_merge;
+-
+-	/*
+-	 * ->map() returns a virtual address mapping of the pipe buffer.
+-	 * The last integer flag reflects whether this should be an atomic
+-	 * mapping or not. The atomic map is faster, however you can't take
+-	 * page faults before calling ->unmap() again. So if you need to eg
+-	 * access user data through copy_to/from_user(), then you must get
+-	 * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
+-	 * atomic maps, so you can't map more than one pipe_buffer at once
+-	 * and you have to be careful if mapping another page as source
+-	 * or destination for a copy (IOW, it has to use something else
+-	 * than KM_USER0).
+-	 */
+-	void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
+-
+-	/*
+-	 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
+-	 */
+-	void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-
+-	/*
+-	 * ->confirm() verifies that the data in the pipe buffer is there
+-	 * and that the contents are good. If the pages in the pipe belong
+-	 * to a file system, we may need to wait for IO completion in this
+-	 * hook. Returns 0 for good, or a negative error value in case of
+-	 * error.
+-	 */
+-	int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * When the contents of this pipe buffer has been completely
+-	 * consumed by a reader, ->release() is called.
+-	 */
+-	void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Attempt to take ownership of the pipe buffer and its contents.
+-	 * ->steal() returns 0 for success, in which case the contents
+-	 * of the pipe (the buf->page) is locked and now completely owned
+-	 * by the caller. The page may then be transferred to a different
+-	 * mapping, the most often used case is insertion into different
+-	 * file address space cache.
+-	 */
+-	int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Get a reference to the pipe buffer.
+-	 */
+-	void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+-};
+-
+-/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+-   memory allocation, whereas PIPE_BUF makes atomicity guarantees.  */
+-#define PIPE_SIZE		PAGE_SIZE
+-
+-/* Drop the inode semaphore and wait for a pipe event, atomically */
+-void pipe_wait(struct pipe_inode_info *pipe);
+-
+-struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
+-void free_pipe_info(struct inode * inode);
+-void __free_pipe_info(struct pipe_inode_info *);
+-
+-/* Generic pipe buffer ops functions */
+-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
+-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-#endif
+diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
+index 10709cb..9bbadbd 100644
+--- a/include/linux/sunrpc/debug.h
++++ b/include/linux/sunrpc/debug.h
+@@ -88,6 +88,7 @@ enum {
+ 	CTL_SLOTTABLE_TCP,
+ 	CTL_MIN_RESVPORT,
+ 	CTL_MAX_RESVPORT,
++	CTL_TRANSPORT,
+ };
  
- 	exp.h.flags = 0;
- 	exp.ex_client = dom;
--	exp.ex_path.mnt = nd.path.mnt;
--	exp.ex_path.dentry = nd.path.dentry;
-+	exp.ex_path.mnt = nd.mnt;
-+	exp.ex_path.dentry = nd.dentry;
- 	exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
- 	err = -ENOMEM;
- 	if (!exp.ex_pathname)
-@@ -607,7 +612,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
- 				goto out;
- 		}
- 
--		err = check_export(nd.path.dentry->d_inode, exp.ex_flags,
-+		err = check_export(nd.dentry->d_inode, exp.ex_flags,
- 				   exp.ex_uuid);
- 		if (err) goto out;
- 	}
-@@ -626,8 +631,10 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
- 	nfsd4_fslocs_free(&exp.ex_fslocs);
- 	kfree(exp.ex_uuid);
- 	kfree(exp.ex_pathname);
--	if (nd.path.dentry)
--		path_put(&nd.path);
-+	if (nd.dentry) {
-+		dput(nd.dentry);
-+		mntput(nd.mnt);
-+	}
-  out_no_path:
- 	if (dom)
- 		auth_domain_put(dom);
-@@ -650,7 +657,7 @@ static int svc_export_show(struct seq_file *m,
- 		return 0;
- 	}
- 	exp = container_of(h, struct svc_export, h);
--	seq_path(m, &exp->ex_path, " \t\n\\");
-+	seq_path(m, exp->ex_path.mnt, exp->ex_path.dentry, " \t\n\\");
- 	seq_putc(m, '\t');
- 	seq_escape(m, exp->ex_client->name, " \t\n\\");
- 	seq_putc(m, '(');
-@@ -1026,7 +1033,7 @@ exp_export(struct nfsctl_export *nxp)
- 		goto out_put_clp;
- 	err = -EINVAL;
- 
--	exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
-+	exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
- 
- 	memset(&new, 0, sizeof(new));
- 
-@@ -1034,8 +1041,8 @@ exp_export(struct nfsctl_export *nxp)
- 	if ((nxp->ex_flags & NFSEXP_FSID) &&
- 	    (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
- 	    fsid_key->ek_path.mnt &&
--	    (fsid_key->ek_path.mnt != nd.path.mnt ||
--	     fsid_key->ek_path.dentry != nd.path.dentry))
-+	    (fsid_key->ek_path.mnt != nd.mnt ||
-+	     fsid_key->ek_path.dentry != nd.dentry))
- 		goto finish;
- 
- 	if (!IS_ERR(exp)) {
-@@ -1051,7 +1058,7 @@ exp_export(struct nfsctl_export *nxp)
- 		goto finish;
- 	}
- 
--	err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL);
-+	err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL);
- 	if (err) goto finish;
- 
- 	err = -ENOMEM;
-@@ -1064,7 +1071,8 @@ exp_export(struct nfsctl_export *nxp)
- 	if (!new.ex_pathname)
- 		goto finish;
- 	new.ex_client = clp;
--	new.ex_path = nd.path;
-+	new.ex_path.mnt = nd.mnt;
-+	new.ex_path.dentry = nd.dentry;
- 	new.ex_flags = nxp->ex_flags;
- 	new.ex_anon_uid = nxp->ex_anon_uid;
- 	new.ex_anon_gid = nxp->ex_anon_gid;
-@@ -1090,7 +1098,8 @@ finish:
- 		exp_put(exp);
- 	if (fsid_key && !IS_ERR(fsid_key))
- 		cache_put(&fsid_key->h, &svc_expkey_cache);
--	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
- out_put_clp:
- 	auth_domain_put(clp);
- out_unlock:
-@@ -1143,8 +1152,9 @@ exp_unexport(struct nfsctl_export *nxp)
- 		goto out_domain;
- 
- 	err = -EINVAL;
--	exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
--	path_put(&nd.path);
-+	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
- 	if (IS_ERR(exp))
- 		goto out_domain;
- 
-@@ -1180,12 +1190,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
- 		printk("nfsd: exp_rootfh path not found %s", path);
- 		return err;
- 	}
--	inode = nd.path.dentry->d_inode;
-+	inode = nd.dentry->d_inode;
- 
- 	dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
--		 path, nd.path.dentry, clp->name,
-+		 path, nd.dentry, clp->name,
- 		 inode->i_sb->s_id, inode->i_ino);
--	exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL);
-+	exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
- 	if (IS_ERR(exp)) {
- 		err = PTR_ERR(exp);
- 		goto out;
-@@ -1195,7 +1205,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
- 	 * fh must be initialized before calling fh_compose
- 	 */
- 	fh_init(&fh, maxsize);
--	if (fh_compose(&fh, exp, nd.path.dentry, NULL))
-+	if (fh_compose(&fh, exp, nd.dentry, NULL))
- 		err = -EINVAL;
- 	else
- 		err = 0;
-@@ -1203,7 +1213,8 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
- 	fh_put(&fh);
- 	exp_put(exp);
- out:
--	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
- 	return err;
+ #endif /* _LINUX_SUNRPC_DEBUG_H_ */
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index dc69068..3a0f48f 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -255,7 +255,7 @@ struct svc_rqst {
+ 						 * determine what device number
+ 						 * to report (real or virtual)
+ 						 */
+-	int			rq_splice_ok;   /* turned off in gss privacy
++	int			rq_sendfile_ok;   /* turned off in gss privacy
+ 						 * to prevent encrypting page
+ 						 * cache pages */
+ 	wait_queue_head_t	rq_wait;	/* synchronization */
+diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
+index 5369aa3..5e43ad3 100644
+--- a/net/sunrpc/Makefile
++++ b/net/sunrpc/Makefile
+@@ -12,6 +12,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
+ 	    svc.o svcsock.o svcauth.o svcauth_unix.o \
+ 	    rpcb_clnt.o timer.o xdr.o \
+ 	    sunrpc_syms.o cache.o rpc_pipe.o \
+-	    svc_xprt.o
++	    svc_xprt.o backport-socket.o
+ sunrpc-$(CONFIG_PROC_FS) += stats.o
+ sunrpc-$(CONFIG_SYSCTL) += sysctl.o
+diff --git a/net/sunrpc/backport-socket.c b/net/sunrpc/backport-socket.c
+new file mode 100644
+index 0000000..c9006db
+--- /dev/null
++++ b/net/sunrpc/backport-socket.c
+@@ -0,0 +1 @@
++#include "src/socket.c"
+diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
+index 6bfea9e..f0a110d 100644
+--- a/net/sunrpc/auth.c
++++ b/net/sunrpc/auth.c
+@@ -566,19 +566,16 @@ rpcauth_uptodatecred(struct rpc_task *task)
+ 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
  }
  
-@@ -1213,13 +1224,13 @@ static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
- 	struct svc_export *exp;
- 	struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
- 	if (IS_ERR(ek))
--		return ERR_CAST(ek);
-+		return ERR_PTR(PTR_ERR(ek));
+-static struct shrinker rpc_cred_shrinker = {
+-	.shrink = rpcauth_cache_shrinker,
+-	.seeks = DEFAULT_SEEKS,
+-};
++static struct shrinker *rpc_cred_shrinker;
  
- 	exp = exp_get_by_name(clp, ek->ek_path.mnt, ek->ek_path.dentry, reqp);
- 	cache_put(&ek->h, &svc_expkey_cache);
- 
- 	if (IS_ERR(exp))
--		return ERR_CAST(exp);
-+		return ERR_PTR(PTR_ERR(exp));
- 	return exp;
+ void __init rpcauth_init_module(void)
+ {
+ 	rpc_init_authunix();
+ 	rpc_init_generic_auth();
+-	register_shrinker(&rpc_cred_shrinker);
++	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
  }
  
-diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
-index 294992e..bc0b463 100644
---- a/fs/nfsd/auth.c
-+++ b/fs/nfsd/auth.c
-@@ -12,6 +12,8 @@
- #include <linux/nfsd/export.h>
- #include "auth.h"
- 
-+#define        CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
-+
- int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+ void __exit rpcauth_remove_module(void)
  {
- 	struct exp_flavor_info *f;
-@@ -68,12 +70,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
- 	ret = set_current_groups(cred.cr_group_info);
- 	put_group_info(cred.cr_group_info);
- 	if ((cred.cr_uid)) {
--		current->cap_effective =
--			cap_drop_nfsd_set(current->cap_effective);
-+		cap_t(current->cap_effective) &= ~CAP_NFSD_MASK;
- 	} else {
--		current->cap_effective =
--			cap_raise_nfsd_set(current->cap_effective,
--					   current->cap_permitted);
-+		cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
-+				current->cap_permitted);
- 	}
- 	return ret;
+-	unregister_shrinker(&rpc_cred_shrinker);
++	remove_shrinker(rpc_cred_shrinker);
  }
-diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index 2e51ada..275014e 100644
---- a/fs/nfsd/nfs4proc.c
-+++ b/fs/nfsd/nfs4proc.c
-@@ -661,9 +661,6 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 			return status;
- 		}
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 853a414..71ba862 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -481,7 +481,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ 	const void *p, *end;
+ 	void *buf;
+ 	struct gss_upcall_msg *gss_msg;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct gss_cl_ctx *ctx;
+ 	uid_t uid;
+ 	ssize_t err = -EFBIG;
+diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
+index ef45eba..423251a 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
+@@ -99,6 +99,7 @@ get_key(const void *p, const void *end, struct crypto_blkcipher **res)
+ 			printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
+ 			goto out_err_free_key;
  	}
--	status = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
--	if (status)
--		return status;
- 	status = nfs_ok;
- 	if (setattr->sa_acl != NULL)
- 		status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
-@@ -673,7 +670,6 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 	status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
- 				0, (time_t)0);
- out:
--	mnt_drop_write(cstate->current_fh.fh_export->ex_path.mnt);
- 	return status;
- }
++
+ 	*res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
+ 	if (IS_ERR(*res)) {
+ 		printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 81ae3d6..acfb1d1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -859,7 +859,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+ 	u32 priv_len, maj_stat;
+ 	int pad, saved_len, remaining_len, offset;
  
-diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 14ba4d9..43f3532 100644
---- a/fs/nfsd/nfs4xdr.c
-+++ b/fs/nfsd/nfs4xdr.c
-@@ -1446,7 +1446,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
- 	}
- 	if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
- 			| FATTR4_WORD0_SUPPORTED_ATTRS)) {
--		err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
-+		err = nfsd4_get_nfs4_acl(rqstp, dentry, exp->ex_path.mnt ,&acl);
- 		aclsupport = (err == 0);
- 		if (bmval0 & FATTR4_WORD0_ACL) {
- 			if (err == -EOPNOTSUPP)
-diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
-index 145b3c8..7683e96 100644
---- a/fs/nfsd/nfs4recover.c
-+++ b/fs/nfsd/nfs4recover.c
-@@ -121,9 +121,9 @@ out_no_tfm:
- static void
- nfsd4_sync_rec_dir(void)
+-	rqstp->rq_splice_ok = 0;
++	rqstp->rq_sendfile_ok = 0;
+ 
+ 	priv_len = svc_getnl(&buf->head[0]);
+ 	if (rqstp->rq_deferred) {
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index c996671..58e606e 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -696,7 +696,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
  {
--	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
--	nfsd_sync_dir(rec_dir.path.dentry);
--	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
-+	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
-+	nfsd_sync_dir(rec_dir.dentry);
-+	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
- }
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_request *rq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 	int err;
  
- int
-@@ -143,9 +143,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
- 	nfs4_save_user(&uid, &gid);
+ 	if (count == 0)
+@@ -773,7 +773,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
+ 	    loff_t *ppos)
+ {
+ 	int err;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
  
- 	/* lock the parent */
--	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
-+	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (count == 0)
+ 		return 0;
+@@ -804,7 +804,7 @@ cache_poll(struct file *filp, poll_table *wait)
+ 	unsigned int mask;
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_queue *cq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
  
--	dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1);
-+	dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
- 	if (IS_ERR(dentry)) {
- 		status = PTR_ERR(dentry);
- 		goto out_unlock;
-@@ -155,15 +155,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
- 		dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
- 		goto out_put;
- 	}
--	status = mnt_want_write(rec_dir.path.mnt);
--	if (status)
--		goto out_put;
--	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
--	mnt_drop_write(rec_dir.path.mnt);
-+	status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, rec_dir.mnt, S_IRWXU);
- out_put:
- 	dput(dentry);
- out_unlock:
--	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
-+	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
- 	if (status == 0) {
- 		clp->cl_firststate = 1;
- 		nfsd4_sync_rec_dir();
-@@ -226,7 +222,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
+ 	poll_wait(filp, &queue_wait, wait);
  
- 	nfs4_save_user(&uid, &gid);
+@@ -1239,7 +1239,7 @@ static int c_show(struct seq_file *m, void *p)
+ 	return cd->cache_show(m, cd, cp);
+ }
  
--	filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY);
-+	filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
- 	status = PTR_ERR(filp);
- 	if (IS_ERR(filp))
- 		goto out;
-@@ -262,8 +258,8 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
- 		printk("nfsd4: non-file found in client recovery directory\n");
- 		return -EINVAL;
+-static const struct seq_operations cache_content_op = {
++static struct seq_operations cache_content_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+@@ -1269,7 +1269,7 @@ static const struct file_operations content_file_operations = {
+ static ssize_t read_flush(struct file *file, char __user *buf,
+ 			    size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	unsigned long p = *ppos;
+ 	size_t len;
+@@ -1290,7 +1290,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
+ static ssize_t write_flush(struct file * file, const char __user * buf,
+ 			     size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	char *ep;
+ 	long flushtime;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 76739e9..f836bf7 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -213,10 +213,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
  	}
--	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
--	status = vfs_unlink(dir->d_inode, dentry);
-+	mutex_lock(&dir->d_inode->i_mutex);
-+	status = vfs_unlink(dir->d_inode, dentry, rec_dir.mnt);
- 	mutex_unlock(&dir->d_inode->i_mutex);
- 	return status;
- }
-@@ -277,8 +273,8 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
- 	 * any regular files anyway, just in case the directory was created by
- 	 * a kernel from the future.... */
- 	nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
--	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
--	status = vfs_rmdir(dir->d_inode, dentry);
-+	mutex_lock(&dir->d_inode->i_mutex);
-+	status = vfs_rmdir(dir->d_inode, dentry, rec_dir.mnt);
- 	mutex_unlock(&dir->d_inode->i_mutex);
- 	return status;
- }
-@@ -291,9 +287,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
  
- 	dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
+ 	/* save the nodename */
+-	clnt->cl_nodelen = strlen(utsname()->nodename);
++	clnt->cl_nodelen = strlen(system_utsname.nodename);
+ 	if (clnt->cl_nodelen > UNX_MAXNODENAME)
+ 		clnt->cl_nodelen = UNX_MAXNODENAME;
+-	memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
++	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
+ 	rpc_register_client(clnt);
+ 	return clnt;
  
--	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
--	dentry = lookup_one_len(name, rec_dir.path.dentry, namlen);
--	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
-+	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
-+	dentry = lookup_one_len(name, rec_dir.dentry, namlen);
-+	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
- 	if (IS_ERR(dentry)) {
- 		status = PTR_ERR(dentry);
- 		return status;
-@@ -302,7 +298,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
- 	if (!dentry->d_inode)
- 		goto out;
+@@ -1541,23 +1541,21 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
+ 			  const struct rpc_task *task)
+ {
+ 	const char *rpc_waitq = "none";
+-	char *p, action[KSYM_SYMBOL_LEN];
++	int proc = -1;
++
++	if (task->tk_msg.rpc_proc)
++		proc = task->tk_msg.rpc_proc->p_proc;
  
--	status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry);
-+	status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
- out:
- 	dput(dentry);
- 	return status;
-@@ -318,17 +314,12 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
- 	if (!rec_dir_init || !clp->cl_firststate)
- 		return;
+ 	if (RPC_IS_QUEUED(task))
+ 		rpc_waitq = rpc_qname(task->tk_waitqueue);
  
--	status = mnt_want_write(rec_dir.path.mnt);
--	if (status)
--		goto out;
- 	clp->cl_firststate = 0;
- 	nfs4_save_user(&uid, &gid);
- 	status = nfsd4_unlink_clid_dir(clp->cl_recdir, HEXDIR_LEN-1);
- 	nfs4_reset_user(uid, gid);
- 	if (status == 0)
- 		nfsd4_sync_rec_dir();
--	mnt_drop_write(rec_dir.path.mnt);
--out:
- 	if (status)
- 		printk("NFSD: Failed to remove expired client state directory"
- 				" %.*s\n", HEXDIR_LEN, clp->cl_recdir);
-@@ -357,17 +348,12 @@ nfsd4_recdir_purge_old(void) {
- 
- 	if (!rec_dir_init)
- 		return;
--	status = mnt_want_write(rec_dir.path.mnt);
--	if (status)
--		goto out;
--	status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
-+	status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
- 	if (status == 0)
- 		nfsd4_sync_rec_dir();
--	mnt_drop_write(rec_dir.path.mnt);
--out:
- 	if (status)
- 		printk("nfsd4: failed to purge old clients from recovery"
--			" directory %s\n", rec_dir.path.dentry->d_name.name);
-+			" directory %s\n", rec_dir.dentry->d_name.name);
+-	/* map tk_action pointer to a function name; then trim off
+-	 * the "+0x0 [sunrpc]" */
+-	sprint_symbol(action, (unsigned long)task->tk_action);
+-	p = strchr(action, '+');
+-	if (p)
+-		*p = '\0';
+-
+-	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
+-		task->tk_pid, task->tk_flags, task->tk_status,
+-		clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
+-		clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
+-		action, rpc_waitq);
++	printk(KERN_INFO "%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n",
++			task->tk_pid, proc,
++			task->tk_flags, task->tk_status,
++			clnt, clnt->cl_prog,
++			task->tk_rqstp, task->tk_timeout,
++			rpc_waitq,
++			task->tk_action, task->tk_ops);
  }
  
- static int
-@@ -387,10 +373,10 @@ int
- nfsd4_recdir_load(void) {
- 	int status;
- 
--	status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir);
-+	status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir);
- 	if (status)
- 		printk("nfsd4: failed loading clients from recovery"
--			" directory %s\n", rec_dir.path.dentry->d_name.name);
-+			" directory %s\n", rec_dir.dentry->d_name.name);
- 	return status;
- }
- 
-@@ -429,5 +415,6 @@ nfsd4_shutdown_recdir(void)
- 	if (!rec_dir_init)
- 		return;
- 	rec_dir_init = 0;
--	path_put(&rec_dir.path);
-+	dput(rec_dir.dentry);
-+	mntput(rec_dir.mnt);
- }
-diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index 1578d7a..d649b7f 100644
---- a/fs/nfsd/nfs4state.c
-+++ b/fs/nfsd/nfs4state.c
-@@ -50,7 +50,6 @@
- #include <linux/nfsd/state.h>
- #include <linux/nfsd/xdr4.h>
+ void rpc_show_tasks(void)
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 23a2b8f..46a3d9c 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -14,7 +14,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/mount.h>
  #include <linux/namei.h>
--#include <linux/swap.h>
- #include <linux/mutex.h>
- #include <linux/lockd/bind.h>
- #include <linux/module.h>
-@@ -151,7 +150,6 @@ get_nfs4_file(struct nfs4_file *fi)
- }
+-#include <linux/fsnotify.h>
++#include <linux/dnotify.h>
+ #include <linux/kernel.h>
  
- static int num_delegations;
--unsigned int max_delegations;
+ #include <asm/ioctls.h>
+@@ -26,6 +26,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/workqueue.h>
+ #include <linux/sunrpc/rpc_pipe_fs.h>
++#include <linux/path.h>
  
- /*
-  * Open owner state (share locks)
-@@ -197,7 +195,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
- 	dprintk("NFSD alloc_init_deleg\n");
- 	if (fp->fi_had_conflict)
- 		return NULL;
--	if (num_delegations > max_delegations)
-+	if (num_delegations > STATEID_HASH_SIZE * 4)
- 		return NULL;
- 	dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
- 	if (dp == NULL)
-@@ -1258,7 +1256,7 @@ static inline void
- nfs4_file_downgrade(struct file *filp, unsigned int share_access)
+ static struct vfsmount *rpc_mount __read_mostly;
+ static int rpc_mount_count;
+@@ -224,7 +225,7 @@ out:
+ static ssize_t
+ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
  {
- 	if (share_access & NFS4_SHARE_ACCESS_WRITE) {
--		drop_file_write_access(filp);
-+		put_write_access(filp->f_dentry->d_inode);
- 		filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
- 	}
- }
-@@ -1576,7 +1574,7 @@ static __be32
- nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	struct rpc_pipe_msg *msg;
+ 	int res = 0;
+@@ -267,7 +268,7 @@ out_unlock:
+ static ssize_t
+ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
  {
- 	struct file *filp = stp->st_vfs_file;
 -	struct inode *inode = filp->f_path.dentry->d_inode;
 +	struct inode *inode = filp->f_dentry->d_inode;
- 	unsigned int share_access, new_writer;
- 	__be32 status;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	int res;
  
-@@ -1588,10 +1586,6 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_sta
- 		int err = get_write_access(inode);
- 		if (err)
- 			return nfserrno(err);
--		err = mnt_want_write(cur_fh->fh_export->ex_path.mnt);
--		if (err)
--			return nfserrno(err);
--		file_take_write(filp);
- 	}
- 	status = nfsd4_truncate(rqstp, cur_fh, open);
- 	if (status) {
-@@ -1923,7 +1917,7 @@ search_close_lru(u32 st_id, int flags)
- static inline int
- nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
- {
--	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode;
-+	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_dentry->d_inode;
- }
+@@ -285,7 +286,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
+ 	struct rpc_inode *rpci;
+ 	unsigned int mask = 0;
  
- static int
-@@ -1994,7 +1988,7 @@ static inline int
- io_during_grace_disallowed(struct inode *inode, int flags)
+-	rpci = RPC_I(filp->f_path.dentry->d_inode);
++	rpci = RPC_I(filp->f_dentry->d_inode);
+ 	poll_wait(filp, &rpci->waitq, wait);
+ 
+ 	mask = POLLOUT | POLLWRNORM;
+@@ -300,7 +301,7 @@ static int
+ rpc_pipe_ioctl(struct inode *ino, struct file *filp,
+ 		unsigned int cmd, unsigned long arg)
  {
- 	return nfs4_in_grace() && (flags & (RD_STATE | WR_STATE))
--		&& mandatory_lock(inode);
-+		&& MANDATORY_LOCK(inode);
- }
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	int len;
  
- static int check_stateid_generation(stateid_t *in, stateid_t *ref)
-@@ -2838,7 +2832,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 	 * only the dentry:inode set.
- 	 */
- 	memset(&file, 0, sizeof (struct file));
--	file.f_path.dentry = cstate->current_fh.fh_dentry;
-+	file.f_dentry = cstate->current_fh.fh_dentry;
- 
- 	status = nfs_ok;
- 	error = vfs_test_lock(&file, &file_lock);
-@@ -2934,7 +2928,7 @@ static int
- check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
+ 	switch (cmd) {
+@@ -448,7 +449,7 @@ struct vfsmount *rpc_get_mount(void)
  {
- 	struct file_lock **flpp;
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	int status = 0;
+ 	int err;
  
- 	lock_kernel();
-@@ -3163,27 +3157,6 @@ get_nfs4_grace_period(void)
- 	return max(user_lease_time, lease_time) * HZ;
+-	err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
++	err = simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count);
+ 	if (err != 0)
+ 		return ERR_PTR(err);
+ 	return rpc_mount;
+@@ -495,7 +496,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
+ static void
+ rpc_release_path(struct nameidata *nd)
+ {
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	rpc_put_mount();
  }
  
--/*
-- * Since the lifetime of a delegation isn't limited to that of an open, a
-- * client may quite reasonably hang on to a delegation as long as it has
-- * the inode cached.  This becomes an obvious problem the first time a
-- * client's inode cache approaches the size of the server's total memory.
-- *
-- * For now we avoid this problem by imposing a hard limit on the number
-- * of delegations, which varies according to the server's memory size.
-- */
--static void
--set_max_delegations(void)
--{
--	/*
--	 * Allow at most 4 delegations per megabyte of RAM.  Quick
--	 * estimates suggest that in the worst case (where every delegation
--	 * is for a different inode), a delegation could take about 1.5K,
--	 * giving a worst case usage of about 6% of memory.
--	 */
--	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
--}
--
- /* initialization to perform when the nfsd service is started: */
+@@ -591,13 +592,12 @@ rpc_populate(struct dentry *parent,
+ 		}
+ 		inode->i_ino = i;
+ 		if (files[i].i_fop)
+-			inode->i_fop = files[i].i_fop;
++			inode->i_fop = (struct file_operations  *)files[i].i_fop;
+ 		if (private)
+ 			rpc_inode_setowner(inode, private);
+ 		if (S_ISDIR(mode))
+ 			inc_nlink(dir);
+ 		d_add(dentry, inode);
+-		fsnotify_create(dir, dentry);
+ 	}
+ 	mutex_unlock(&dir->i_mutex);
+ 	return 0;
+@@ -619,7 +619,7 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry)
+ 	inode->i_ino = iunique(dir->i_sb, 100);
+ 	d_instantiate(dentry, inode);
+ 	inc_nlink(dir);
+-	fsnotify_mkdir(dir, dentry);
++	inode_dir_notify(dir, DN_CREATE);
+ 	return 0;
+ out_err:
+ 	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
+@@ -668,7 +668,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
  
- static void
-@@ -3199,7 +3172,6 @@ __nfs4_state_start(void)
- 	       grace_time/HZ);
- 	laundry_wq = create_singlethread_workqueue("nfsd4");
- 	queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
--	set_max_delegations();
+ 	if ((error = rpc_lookup_parent(path, nd)) != 0)
+ 		return ERR_PTR(error);
+-	dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
++	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len,
+ 				   1);
+ 	if (IS_ERR(dentry))
+ 		rpc_release_path(nd);
+@@ -696,7 +696,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
+ 	dentry = rpc_lookup_negative(path, &nd);
+ 	if (IS_ERR(dentry))
+ 		return dentry;
+-	dir = nd.path.dentry->d_inode;
++	dir = nd.dentry->d_inode;
+ 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
+ 		goto err_dput;
+ 	RPC_I(dentry->d_inode)->private = rpc_client;
+@@ -788,14 +788,14 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi
+ 	if (!inode)
+ 		goto err_dput;
+ 	inode->i_ino = iunique(dir->i_sb, 100);
+-	inode->i_fop = &rpc_pipe_fops;
++	inode->i_fop = (struct file_operations  *)&rpc_pipe_fops;
+ 	d_instantiate(dentry, inode);
+ 	rpci = RPC_I(inode);
+ 	rpci->private = private;
+ 	rpci->flags = flags;
+ 	rpci->ops = ops;
+ 	rpci->nkern_readwriters = 1;
+-	fsnotify_create(dir, dentry);
++	inode_dir_notify(dir, DN_CREATE);
+ 	dget(dentry);
+ out:
+ 	mutex_unlock(&dir->i_mutex);
+@@ -882,11 +882,11 @@ out:
+ 	return -ENOMEM;
  }
  
- void
-@@ -3294,11 +3266,12 @@ nfs4_reset_recoverydir(char *recdir)
- 	if (status)
- 		return status;
- 	status = -ENOTDIR;
--	if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
-+	if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
- 		nfs4_set_recdir(recdir);
- 		status = 0;
- 	}
--	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
- 	return status;
+-static int
++static struct super_block *
+ rpc_get_sb(struct file_system_type *fs_type,
+-		int flags, const char *dev_name, void *data, struct vfsmount *mnt)
++		int flags, const char *dev_name, void *data)
+ {
+-	return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
++	return get_sb_single(fs_type, flags, data, rpc_fill_super);
  }
  
-diff --git a/fs/nfs/write.c b/fs/nfs/write.c
-index 3229e21..8e2d5e5 100644
---- a/fs/nfs/write.c
-+++ b/fs/nfs/write.c
-@@ -14,6 +14,8 @@
- #include <linux/writeback.h>
- #include <linux/swap.h>
+ static struct file_system_type rpc_pipe_fs_type = {
+@@ -897,7 +897,7 @@ static struct file_system_type rpc_pipe_fs_type = {
+ };
  
-+#include <linux/mpage.h>
-+
- #include <linux/sunrpc/clnt.h>
- #include <linux/nfs_fs.h>
- #include <linux/nfs_mount.h>
-@@ -181,41 +183,6 @@ static int wb_priority(struct writeback_control *wbc)
+ static void
+-init_once(void *foo)
++init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
+ 
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 24db2b4..0f6f1ea 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -117,18 +117,6 @@ static void rpcb_map_release(void *data)
+ 	kfree(map);
  }
  
- /*
-- * NFS congestion control
-- */
+-static const struct sockaddr_in rpcb_inaddr_loopback = {
+-	.sin_family		= AF_INET,
+-	.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
+-	.sin_port		= htons(RPCBIND_PORT),
+-};
 -
--int nfs_congestion_kb;
+-static const struct sockaddr_in6 rpcb_in6addr_loopback = {
+-	.sin6_family		= AF_INET6,
+-	.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
+-	.sin6_port		= htons(RPCBIND_PORT),
+-};
 -
--#define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
--#define NFS_CONGESTION_OFF_THRESH	\
--	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
--
--static int nfs_set_page_writeback(struct page *page)
--{
--	int ret = test_set_page_writeback(page);
--
--	if (!ret) {
--		struct inode *inode = page->mapping->host;
--		struct nfs_server *nfss = NFS_SERVER(inode);
--
--		if (atomic_long_inc_return(&nfss->writeback) >
--				NFS_CONGESTION_ON_THRESH)
--			set_bdi_congested(&nfss->backing_dev_info, WRITE);
--	}
--	return ret;
--}
--
--static void nfs_end_page_writeback(struct page *page)
--{
--	struct inode *inode = page->mapping->host;
--	struct nfs_server *nfss = NFS_SERVER(inode);
--
--	end_page_writeback(page);
--	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
--		clear_bdi_congested(&nfss->backing_dev_info, WRITE);
--}
--
--/*
-  * Find an associated nfs write request, and prepare to flush it out
-  * May return an error if the user signalled nfs_wait_on_request().
-  */
-@@ -251,7 +218,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
- 		spin_unlock(&inode->i_lock);
- 		BUG();
- 	}
--	if (nfs_set_page_writeback(page) != 0) {
-+	if (test_set_page_writeback(page) != 0) {
- 		spin_unlock(&inode->i_lock);
- 		BUG();
- 	}
-@@ -319,11 +286,11 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
- 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
+ static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
+ 					  size_t addrlen, u32 version)
+ {
+@@ -248,6 +236,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
+ 		.rpc_argp	= &map,
+ 		.rpc_resp	= okay,
+ 	};
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
  
- 	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
--	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
-+	err = generic_writepages(mapping, wbc);
- 	nfs_pageio_complete(&pgio);
--	if (err < 0)
-+	if (err)
- 		return err;
--	if (pgio.pg_error < 0)
-+	if (pgio.pg_error)
- 		return pgio.pg_error;
- 	return 0;
+ 	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
+ 			"rpcbind\n", (port ? "" : "un"),
+@@ -272,6 +265,12 @@ static int rpcb_register_netid4(struct sockaddr_in *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin_port);
+ 	char buf[32];
+ 
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIPQUAD_FMT".%u.%u",
+@@ -303,6 +302,12 @@ static int rpcb_register_netid6(struct sockaddr_in6 *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin6_port);
+ 	char buf[64];
+ 
++	struct sockaddr_in6 rpcb_in6addr_loopback = {
++		.sin6_family		= AF_INET6,
++		.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
++		.sin6_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET6 universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIP6_FMT".%u.%u",
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index 50b049c..5053a5f 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -264,7 +264,7 @@ rpc_proc_init(void)
+ 	dprintk("RPC:       registering /proc/net/rpc\n");
+ 	if (!proc_net_rpc) {
+ 		struct proc_dir_entry *ent;
+-		ent = proc_mkdir("rpc", init_net.proc_net);
++		ent = proc_mkdir("rpc", proc_net);
+ 		if (ent) {
+ 			ent->owner = THIS_MODULE;
+ 			proc_net_rpc = ent;
+@@ -278,7 +278,7 @@ rpc_proc_exit(void)
+ 	dprintk("RPC:       unregistering /proc/net/rpc\n");
+ 	if (proc_net_rpc) {
+ 		proc_net_rpc = NULL;
+-		remove_proc_entry("rpc", init_net.proc_net);
++		remove_proc_entry("rpc", proc_net);
+ 	}
  }
-@@ -410,19 +377,15 @@ nfs_mark_request_commit(struct nfs_page *req)
- 			req->wb_index,
- 			NFS_PAGE_TAG_COMMIT);
- 	spin_unlock(&inode->i_lock);
--	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
--	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
-+	inc_page_state(nr_unstable);
- 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- }
  
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 5a32cb7..e0e87c6 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -174,7 +174,7 @@ fail:
  static int
- nfs_clear_request_commit(struct nfs_page *req)
+ svc_pool_map_init_percpu(struct svc_pool_map *m)
  {
--	struct page *page = req->wb_page;
--
- 	if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
--		dec_zone_page_state(page, NR_UNSTABLE_NFS);
--		dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
-+		dec_page_state(nr_unstable);
- 		return 1;
+-	unsigned int maxpools = nr_cpu_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int cpu;
+ 	int err;
+@@ -202,7 +202,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
+ static int
+ svc_pool_map_init_pernode(struct svc_pool_map *m)
+ {
+-	unsigned int maxpools = nr_node_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int node;
+ 	int err;
+@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
+ 	switch (m->mode) {
+ 	case SVC_POOL_PERCPU:
+ 	{
+-		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
++		set_cpus_allowed(task, cpumask_of_cpu(node));
+ 		break;
  	}
- 	return 0;
-@@ -726,8 +689,8 @@ int nfs_updatepage(struct file *file, struct page *page,
- 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
+ 	case SVC_POOL_PERNODE:
+ 	{
+-		node_to_cpumask_ptr(nodecpumask, node);
+-		set_cpus_allowed_ptr(task, nodecpumask);
++		set_cpus_allowed(task, node_to_cpumask(node));
+ 		break;
+ 	}
+ 	}
+@@ -831,7 +830,7 @@ svc_process(struct svc_rqst *rqstp)
+ 	rqstp->rq_res.tail[0].iov_base = NULL;
+ 	rqstp->rq_res.tail[0].iov_len = 0;
+ 	/* Will be turned off only in gss privacy case: */
+-	rqstp->rq_splice_ok = 1;
++	rqstp->rq_sendfile_ok = 1;
  
- 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
--		file->f_path.dentry->d_parent->d_name.name,
--		file->f_path.dentry->d_name.name, count,
-+		file->f_dentry->d_parent->d_name.name,
-+		file->f_dentry->d_name.name, count,
- 		(long long)(page_offset(page) + offset));
+ 	/* Setup reply header */
+ 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
+index 8a73cbb..80a16e2 100644
+--- a/net/sunrpc/svcauth.c
++++ b/net/sunrpc/svcauth.c
+@@ -121,8 +121,7 @@ EXPORT_SYMBOL(svc_auth_unregister);
+ #define	DN_HASHMASK	(DN_HASHMAX-1)
  
- 	/* If we're not using byte range locks, and we know the page
-@@ -757,10 +720,10 @@ static void nfs_writepage_release(struct nfs_page *req)
- {
+ static struct hlist_head	auth_domain_table[DN_HASHMAX];
+-static spinlock_t	auth_domain_lock =
+-	__SPIN_LOCK_UNLOCKED(auth_domain_lock);
++static spinlock_t	auth_domain_lock = SPIN_LOCK_UNLOCKED;
  
- 	if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
--		nfs_end_page_writeback(req->wb_page);
-+		end_page_writeback(req->wb_page);
- 		nfs_inode_remove_request(req);
- 	} else
--		nfs_end_page_writeback(req->wb_page);
-+		end_page_writeback(req->wb_page);
- 	nfs_clear_page_tag_locked(req);
- }
- 
-@@ -854,7 +817,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
- static void nfs_redirty_request(struct nfs_page *req)
+ void auth_domain_put(struct auth_domain *dom)
  {
- 	nfs_mark_request_dirty(req);
--	nfs_end_page_writeback(req->wb_page);
-+	end_page_writeback(req->wb_page);
- 	nfs_clear_page_tag_locked(req);
- }
- 
-@@ -1074,13 +1037,13 @@ static void nfs_writeback_release_full(void *calldata)
- 		if (nfs_write_need_commit(data)) {
- 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- 			nfs_mark_request_commit(req);
--			nfs_end_page_writeback(page);
-+			end_page_writeback(page);
- 			dprintk(" marked for commit\n");
- 			goto next;
- 		}
- 		dprintk(" OK\n");
- remove_request:
--		nfs_end_page_writeback(page);
-+		end_page_writeback(page);
- 		nfs_inode_remove_request(req);
- 	next:
- 		nfs_clear_page_tag_locked(req);
-@@ -1250,6 +1213,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index f24800f..b30d725 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -678,7 +678,7 @@ int
+ svcauth_unix_set_client(struct svc_rqst *rqstp)
  {
- 	struct nfs_write_data	*data;
- 	struct nfs_page         *req;
-+	int			res = 0;
+ 	struct sockaddr_in *sin;
+-	struct sockaddr_in6 *sin6, sin6_storage;
++	struct sockaddr_in6 *sin6 = NULL, sin6_storage;
+ 	struct ip_map *ipm;
  
- 	data = nfs_commitdata_alloc();
- 
-@@ -1260,14 +1224,14 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
- 	return nfs_commit_rpcsetup(head, data, how);
-  out_bad:
- 	while (!list_empty(head)) {
-+		res++;
- 		req = nfs_list_entry(head->next);
- 		nfs_list_remove_request(req);
- 		nfs_mark_request_commit(req);
--		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
--		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
--				BDI_RECLAIMABLE);
- 		nfs_clear_page_tag_locked(req);
+ 	switch (rqstp->rq_addr.ss_family) {
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 3e65719..cbb47a6 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -472,12 +472,16 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
+ 	if (len < 0)
+ 		return len;
+ 	rqstp->rq_addrlen = len;
+-	if (skb->tstamp.tv64 == 0) {
+-		skb->tstamp = ktime_get_real();
++	if (skb->tstamp.off_sec == 0) {
++		struct timeval tv;
++
++		tv.tv_sec = xtime.tv_sec;
++		tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
++		skb_set_timestamp(skb, &tv);
+ 		/* Don't enable netstamp, sunrpc doesn't
+ 		   need that much accuracy */
  	}
-+	if (res)
-+		sub_page_state(nr_unstable, res);
- 	return -ENOMEM;
- }
+-	svsk->sk_sk->sk_stamp = skb->tstamp;
++	skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
+ 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
  
-@@ -1363,11 +1327,11 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
- 	long pages, ret;
+ 	/*
+diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
+index 5231f7a..1482e34 100644
+--- a/net/sunrpc/sysctl.c
++++ b/net/sunrpc/sysctl.c
+@@ -135,6 +135,7 @@ done:
  
- 	/* FIXME */
--	if (wbc->range_cyclic)
-+	if (wbc->sync_mode == WB_SYNC_NONE)
- 		idx_start = 0;
- 	else {
--		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
--		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
-+		idx_start = wbc->start >> PAGE_CACHE_SHIFT;
-+		idx_end = wbc->end >> PAGE_CACHE_SHIFT;
- 		if (idx_end > idx_start) {
- 			pgoff_t l_npages = 1 + idx_end - idx_start;
- 			npages = l_npages;
-@@ -1428,7 +1392,6 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
- 		.sync_mode = WB_SYNC_NONE,
- 		.nr_to_write = LONG_MAX,
- 		.for_writepages = 1,
--		.range_cyclic = 1,
- 	};
- 	int ret;
+ static ctl_table debug_table[] = {
+ 	{
++		.ctl_name	= CTL_RPCDEBUG, 
+ 		.procname	= "rpc_debug",
+ 		.data		= &rpc_debug,
+ 		.maxlen		= sizeof(int),
+@@ -142,6 +143,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDEBUG,
+ 		.procname	= "nfs_debug",
+ 		.data		= &nfs_debug,
+ 		.maxlen		= sizeof(int),
+@@ -149,6 +151,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDDEBUG,
+ 		.procname	= "nfsd_debug",
+ 		.data		= &nfsd_debug,
+ 		.maxlen		= sizeof(int),
+@@ -156,6 +159,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NLMDEBUG,
+ 		.procname	= "nlm_debug",
+ 		.data		= &nlm_debug,
+ 		.maxlen		= sizeof(int),
+@@ -163,6 +167,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_TRANSPORT,
+ 		.procname	= "transports",
+ 		.maxlen		= 256,
+ 		.mode		= 0444,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 6fb493c..761ad29 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -247,10 +247,6 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
  
-@@ -1452,51 +1415,7 @@ int nfs_wb_nocommit(struct inode *inode)
- 	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
- }
- 
--int nfs_wb_page_cancel(struct inode *inode, struct page *page)
--{
--	struct nfs_page *req;
--	loff_t range_start = page_offset(page);
--	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
--	struct writeback_control wbc = {
--		.bdi = page->mapping->backing_dev_info,
--		.sync_mode = WB_SYNC_ALL,
--		.nr_to_write = LONG_MAX,
--		.range_start = range_start,
--		.range_end = range_end,
--	};
--	int ret = 0;
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
 -
--	BUG_ON(!PageLocked(page));
--	for (;;) {
--		req = nfs_page_find_request(page);
--		if (req == NULL)
--			goto out;
--		if (test_bit(PG_CLEAN, &req->wb_flags)) {
--			nfs_release_request(req);
--			break;
--		}
--		if (nfs_lock_request_dontget(req)) {
--			nfs_inode_remove_request(req);
--			/*
--			 * In case nfs_inode_remove_request has marked the
--			 * page as being dirty
--			 */
--			cancel_dirty_page(page, PAGE_CACHE_SIZE);
--			nfs_unlock_request(req);
--			break;
--		}
--		ret = nfs_wait_on_request(req);
--		if (ret < 0)
--			goto out;
--	}
--	if (!PagePrivate(page))
--		return 0;
--	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
--out:
--	return ret;
--}
--
--static int nfs_wb_page_priority(struct inode *inode, struct page *page,
-+int nfs_wb_page_priority(struct inode *inode, struct page *page,
- 				int how)
- {
- 	loff_t range_start = page_offset(page);
-@@ -1505,8 +1424,8 @@ static int nfs_wb_page_priority(struct inode *inode, struct page *page,
- 		.bdi = page->mapping->backing_dev_info,
- 		.sync_mode = WB_SYNC_ALL,
- 		.nr_to_write = LONG_MAX,
--		.range_start = range_start,
--		.range_end = range_end,
-+		.start = range_start,
-+		.end = range_end,
- 	};
- 	int ret;
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an SQ
+@@ -411,10 +407,6 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
  
-@@ -1554,26 +1473,6 @@ int __init nfs_init_writepagecache(void)
- 	if (nfs_commit_mempool == NULL)
- 		return -ENOMEM;
- 
--	/*
--	 * NFS congestion size, scale with available memory.
--	 *
--	 *  64MB:    8192k
--	 * 128MB:   11585k
--	 * 256MB:   16384k
--	 * 512MB:   23170k
--	 *   1GB:   32768k
--	 *   2GB:   46340k
--	 *   4GB:   65536k
--	 *   8GB:   92681k
--	 *  16GB:  131072k
--	 *
--	 * This allows larger machines to have larger/more transfers.
--	 * Limit the default to 256M
--	 */
--	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
--	if (nfs_congestion_kb > 256*1024)
--		nfs_congestion_kb = 256*1024;
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
 -
- 	return 0;
- }
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an RQ
+@@ -1116,9 +1108,6 @@ static void __svc_rdma_free(struct work_struct *work)
+ 		container_of(work, struct svcxprt_rdma, sc_work);
+ 	dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
  
-diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
-index b62481d..0937c19 100644
---- a/fs/nfs/sysctl.c
-+++ b/fs/nfs/sysctl.c
-@@ -50,14 +50,6 @@ static ctl_table nfs_cb_sysctls[] = {
- 		.proc_handler	= &proc_dointvec_jiffies,
- 		.strategy	= &sysctl_jiffies,
- 	},
--	{
--		.ctl_name	= CTL_UNNUMBERED,
--		.procname	= "nfs_congestion_kb",
--		.data		= &nfs_congestion_kb,
--		.maxlen		= sizeof(nfs_congestion_kb),
--		.mode		= 0644,
--		.proc_handler	= &proc_dointvec,
--	},
- 	{ .ctl_name = 0 }
- };
- 
-@@ -83,7 +75,7 @@ static ctl_table nfs_cb_sysctl_root[] = {
- 
- int nfs_register_sysctl(void)
- {
--	nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root);
-+	nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root,0);
- 	if (nfs_callback_sysctl_table == NULL)
- 		return -ENOMEM;
- 	return 0;
+-	/* We should only be called from kref_put */
+-	BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
+-
+ 	/*
+ 	 * Destroy queued, but not processed read completions. Note
+ 	 * that this cleanup has to be done before destroying the

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_net.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_net.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.16_sles10_sp2/rnfs_net.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,876 +0,0 @@
-diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index 76739e9..a5bebf6 100644
---- a/net/sunrpc/clnt.c
-+++ b/net/sunrpc/clnt.c
-@@ -213,10 +213,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
- 	}
- 
- 	/* save the nodename */
--	clnt->cl_nodelen = strlen(utsname()->nodename);
-+	clnt->cl_nodelen = strlen(system_utsname.nodename);
- 	if (clnt->cl_nodelen > UNX_MAXNODENAME)
- 		clnt->cl_nodelen = UNX_MAXNODENAME;
--	memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
-+	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
- 	rpc_register_client(clnt);
- 	return clnt;
- 
-@@ -309,7 +309,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
- 		return clnt;
- 
- 	if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
--		int err = rpc_ping(clnt, RPC_TASK_SOFT);
-+		int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
- 		if (err != 0) {
- 			rpc_shutdown_client(clnt);
- 			return ERR_PTR(err);
-@@ -320,6 +320,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
- 	if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
- 		clnt->cl_softrtry = 0;
- 
-+	if (args->flags & RPC_CLNT_CREATE_INTR)
-+		clnt->cl_intr = 1;
- 	if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
- 		clnt->cl_autobind = 1;
- 	if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
-@@ -489,7 +491,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
- 	clnt->cl_prog     = program->number;
- 	clnt->cl_vers     = version->number;
- 	clnt->cl_stats    = program->stats;
--	err = rpc_ping(clnt, RPC_TASK_SOFT);
-+	err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
- 	if (err != 0) {
- 		rpc_shutdown_client(clnt);
- 		clnt = ERR_PTR(err);
-@@ -511,6 +513,46 @@ static const struct rpc_call_ops rpc_default_ops = {
- 	.rpc_call_done = rpc_default_callback,
- };
- 
-+/*
-+ *     Export the signal mask handling for synchronous code that
-+ *     sleeps on RPC calls
-+ */
-+#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
-+
-+static void rpc_save_sigmask(sigset_t *oldset, int intr)
-+{
-+       unsigned long   sigallow = sigmask(SIGKILL);
-+       sigset_t sigmask;
-+
-+       /* Block all signals except those listed in sigallow */
-+       if (intr)
-+               sigallow |= RPC_INTR_SIGNALS;
-+       siginitsetinv(&sigmask, sigallow);
-+       sigprocmask(SIG_BLOCK, &sigmask, oldset);
-+}
-+
-+static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
-+{
-+       rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
-+}
-+
-+static inline void rpc_restore_sigmask(sigset_t *oldset)
-+{
-+       sigprocmask(SIG_SETMASK, oldset, NULL);
-+}
-+
-+void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
-+{
-+       rpc_save_sigmask(oldset, clnt->cl_intr);
-+}
-+EXPORT_SYMBOL_GPL(rpc_clnt_sigmask);
-+
-+void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
-+{
-+       rpc_restore_sigmask(oldset);
-+}
-+EXPORT_SYMBOL_GPL(rpc_clnt_sigunmask);
-+
- /**
-  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
-  * @task_setup_data: pointer to task initialisation data
-@@ -518,6 +560,7 @@ static const struct rpc_call_ops rpc_default_ops = {
- struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
- {
- 	struct rpc_task *task, *ret;
-+	sigset_t oldset;
- 
- 	task = rpc_new_task(task_setup_data);
- 	if (task == NULL) {
-@@ -532,10 +575,12 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
- 		rpc_put_task(task);
- 		goto out;
- 	}
-+	rpc_task_sigmask(task, &oldset);
- 	atomic_inc(&task->tk_count);
- 	rpc_execute(task);
- 	ret = task;
- out:
-+	rpc_restore_sigmask(&oldset);
- 	return ret;
- }
- EXPORT_SYMBOL_GPL(rpc_run_task);
-@@ -1548,7 +1593,7 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
- 
- 	/* map tk_action pointer to a function name; then trim off
- 	 * the "+0x0 [sunrpc]" */
--	sprint_symbol(action, (unsigned long)task->tk_action);
-+	print_symbol(action, (unsigned long)task->tk_action);
- 	p = strchr(action, '+');
- 	if (p)
- 		*p = '\0';
-diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
-index a661a3a..b43b2a4 100644
---- a/net/sunrpc/socklib.c
-+++ b/net/sunrpc/socklib.c
-@@ -156,7 +156,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
- 	desc.offset = sizeof(struct udphdr);
- 	desc.count = skb->len - desc.offset;
- 
--	if (skb_csum_unnecessary(skb))
-+	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- 		goto no_checksum;
- 
- 	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
-index 385f427..eb1ff39 100644
---- a/net/sunrpc/sched.c
-+++ b/net/sunrpc/sched.c
-@@ -222,9 +222,9 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
- }
- EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
- 
--static int rpc_wait_bit_killable(void *word)
-+static int rpc_wait_bit_interruptible(void *word)
- {
--	if (fatal_signal_pending(current))
-+	if (signal_pending(current))
- 		return -ERESTARTSYS;
- 	schedule();
- 	return 0;
-@@ -276,9 +276,9 @@ static void rpc_mark_complete_task(struct rpc_task *task)
- int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
- {
- 	if (action == NULL)
--		action = rpc_wait_bit_killable;
-+		action = rpc_wait_bit_interruptible;
- 	return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
--			action, TASK_KILLABLE);
-+			action, TASK_INTERRUPTIBLE);
- }
- EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
- 
-@@ -659,9 +659,10 @@ static void __rpc_execute(struct rpc_task *task)
- 
- 		/* sync task: sleep here */
- 		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
-+		/* Note: Caller should be using rpc_clnt_sigmask() */
- 		status = out_of_line_wait_on_bit(&task->tk_runstate,
--				RPC_TASK_QUEUED, rpc_wait_bit_killable,
--				TASK_KILLABLE);
-+				RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
-+				TASK_INTERRUPTIBLE);
- 		if (status == -ERESTARTSYS) {
- 			/*
- 			 * When a sync task receives a signal, it exits with
-@@ -729,7 +730,7 @@ struct rpc_buffer {
- void *rpc_malloc(struct rpc_task *task, size_t size)
- {
- 	struct rpc_buffer *buf;
--	gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
-+	gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOFS;
- 
- 	size += sizeof(struct rpc_buffer);
- 	if (size <= RPC_BUFFER_MAXSIZE)
-@@ -800,6 +801,8 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
- 		kref_get(&task->tk_client->cl_kref);
- 		if (task->tk_client->cl_softrtry)
- 			task->tk_flags |= RPC_TASK_SOFT;
-+		if (!task->tk_client->cl_intr)
-+			task->tk_flags |= RPC_TASK_NOINTR;
- 	}
- 
- 	if (task->tk_ops->rpc_call_prepare != NULL)
-@@ -819,7 +822,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
- 	task->tk_start = jiffies;
- 
- 	dprintk("RPC:       new task initialized, procpid %u\n",
--				task_pid_nr(current));
-+				current->pid);
- }
- 
- static struct rpc_task *
-diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
-index 6bfea9e..a74223b 100644
---- a/net/sunrpc/auth.c
-+++ b/net/sunrpc/auth.c
-@@ -566,19 +566,17 @@ rpcauth_uptodatecred(struct rpc_task *task)
- 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
- }
- 
--static struct shrinker rpc_cred_shrinker = {
--	.shrink = rpcauth_cache_shrinker,
--	.seeks = DEFAULT_SEEKS,
--};
-+static struct shrinker *rpc_cred_shrinker;
- 
- void __init rpcauth_init_module(void)
- {
- 	rpc_init_authunix();
- 	rpc_init_generic_auth();
--	register_shrinker(&rpc_cred_shrinker);
-+	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
- }
- 
- void __exit rpcauth_remove_module(void)
- {
--	unregister_shrinker(&rpc_cred_shrinker);
-+	if (rpc_cred_shrinker != NULL)
-+		remove_shrinker(rpc_cred_shrinker);
- }
-diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
-index 5a32cb7..deb8526 100644
---- a/net/sunrpc/svc.c
-+++ b/net/sunrpc/svc.c
-@@ -174,7 +174,7 @@ fail:
- static int
- svc_pool_map_init_percpu(struct svc_pool_map *m)
- {
--	unsigned int maxpools = nr_cpu_ids;
-+	unsigned int maxpools = highest_possible_processor_id() + 1;
- 	unsigned int pidx = 0;
- 	unsigned int cpu;
- 	int err;
-@@ -202,7 +202,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
- static int
- svc_pool_map_init_pernode(struct svc_pool_map *m)
- {
--	unsigned int maxpools = nr_node_ids;
-+	unsigned int maxpools = highest_possible_node_id() + 1;
- 	unsigned int pidx = 0;
- 	unsigned int node;
- 	int err;
-@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
- 	switch (m->mode) {
- 	case SVC_POOL_PERCPU:
- 	{
--		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
-+		set_cpus_allowed(task, cpumask_of_cpu(node));
- 		break;
- 	}
- 	case SVC_POOL_PERNODE:
- 	{
--		node_to_cpumask_ptr(nodecpumask, node);
--		set_cpus_allowed_ptr(task, nodecpumask);
-+		set_cpus_allowed(task, node_to_cpumask(node));
- 		break;
- 	}
- 	}
-diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
-index 3e65719..54f7490 100644
---- a/net/sunrpc/svcsock.c
-+++ b/net/sunrpc/svcsock.c
-@@ -297,9 +297,12 @@ EXPORT_SYMBOL(svc_sock_names);
- static int svc_recv_available(struct svc_sock *svsk)
- {
- 	struct socket	*sock = svsk->sk_sock;
-+	mm_segment_t oldfs = get_fs();
- 	int		avail, err;
- 
--	err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
-+	set_fs(KERNEL_DS);
-+	err = sock->ops->ioctl(sock, TIOCINQ, (unsigned long) &avail);
-+	set_fs(oldfs);
- 
- 	return (err >= 0)? avail : err;
- }
-@@ -472,12 +475,16 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
- 	if (len < 0)
- 		return len;
- 	rqstp->rq_addrlen = len;
--	if (skb->tstamp.tv64 == 0) {
--		skb->tstamp = ktime_get_real();
-+	if (skb->tstamp.off_sec== 0) {
-+		struct timeval tv;
-+
-+		tv.tv_sec = xtime.tv_sec;
-+		tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
-+		skb_set_timestamp(skb, &tv);
- 		/* Don't enable netstamp, sunrpc doesn't
- 		   need that much accuracy */
- 	}
--	svsk->sk_sk->sk_stamp = skb->tstamp;
-+	skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
- 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
- 
- 	/*
-@@ -717,6 +724,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
- 	struct socket	*sock = svsk->sk_sock;
- 	struct socket	*newsock;
- 	struct svc_sock	*newsvsk;
-+	struct sock *sk = sock->sk;
- 	int		err, slen;
- 	RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
- 
-@@ -725,7 +733,11 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
- 		return NULL;
- 
- 	clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
--	err = kernel_accept(sock, &newsock, O_NONBLOCK);
-+
-+	err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, 
-+			&newsock);
-+	if (err >= 0) 
-+		err = sock->ops->accept(sock, newsock, O_NONBLOCK);
- 	if (err < 0) {
- 		if (err == -ENOMEM)
- 			printk(KERN_WARNING "%s: no more sockets!\n",
-@@ -735,6 +747,8 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
- 				   serv->sv_name, -err);
- 		return NULL;
- 	}
-+	newsock->ops = sock->ops;
-+
- 	set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
- 
- 	err = kernel_getpeername(newsock, sin, &slen);
-diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
-index 8a73cbb..80a16e2 100644
---- a/net/sunrpc/svcauth.c
-+++ b/net/sunrpc/svcauth.c
-@@ -121,8 +121,7 @@ EXPORT_SYMBOL(svc_auth_unregister);
- #define	DN_HASHMASK	(DN_HASHMAX-1)
- 
- static struct hlist_head	auth_domain_table[DN_HASHMAX];
--static spinlock_t	auth_domain_lock =
--	__SPIN_LOCK_UNLOCKED(auth_domain_lock);
-+static spinlock_t	auth_domain_lock = SPIN_LOCK_UNLOCKED;
- 
- void auth_domain_put(struct auth_domain *dom)
- {
-diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
-index 50b049c..a663ad1 100644
---- a/net/sunrpc/stats.c
-+++ b/net/sunrpc/stats.c
-@@ -224,10 +224,15 @@ EXPORT_SYMBOL_GPL(rpc_print_iostats);
- static inline struct proc_dir_entry *
- do_register(const char *name, void *data, const struct file_operations *fops)
- {
-+	struct proc_dir_entry *ent;
-+
- 	rpc_proc_init();
- 	dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
- 
--	return proc_create_data(name, 0, proc_net_rpc, fops, data);
-+	ent = create_proc_entry(name, 0, proc_net_rpc);
-+	if (ent) 
-+		ent->proc_fops = (struct file_operations *) fops;
-+	return ent;
- }
- 
- struct proc_dir_entry *
-@@ -264,7 +269,7 @@ rpc_proc_init(void)
- 	dprintk("RPC:       registering /proc/net/rpc\n");
- 	if (!proc_net_rpc) {
- 		struct proc_dir_entry *ent;
--		ent = proc_mkdir("rpc", init_net.proc_net);
-+		ent = proc_mkdir("rpc", proc_net);
- 		if (ent) {
- 			ent->owner = THIS_MODULE;
- 			proc_net_rpc = ent;
-@@ -278,7 +283,7 @@ rpc_proc_exit(void)
- 	dprintk("RPC:       unregistering /proc/net/rpc\n");
- 	if (proc_net_rpc) {
- 		proc_net_rpc = NULL;
--		remove_proc_entry("rpc", init_net.proc_net);
-+		remove_proc_entry("net/rpc", NULL);
- 	}
- }
- 
-diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
-index 0f8c439..31801a4 100644
---- a/net/sunrpc/sysctl.c
-+++ b/net/sunrpc/sysctl.c
-@@ -44,7 +44,7 @@ void
- rpc_register_sysctl(void)
- {
- 	if (!sunrpc_table_header)
--		sunrpc_table_header = register_sysctl_table(sunrpc_table);
-+		sunrpc_table_header = register_sysctl_table(sunrpc_table, 0);
- }
- 
- void
-diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
-index 853a414..71ba862 100644
---- a/net/sunrpc/auth_gss/auth_gss.c
-+++ b/net/sunrpc/auth_gss/auth_gss.c
-@@ -481,7 +481,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
- 	const void *p, *end;
- 	void *buf;
- 	struct gss_upcall_msg *gss_msg;
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct gss_cl_ctx *ctx;
- 	uid_t uid;
- 	ssize_t err = -EFBIG;
-diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
-index c93fca2..c9968be 100644
---- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
-+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
-@@ -205,9 +205,6 @@ encryptor(struct scatterlist *sg, void *data)
- 	if (thislen == 0)
- 		return 0;
- 
--	sg_mark_end(&desc->infrags[desc->fragno - 1]);
--	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
--
- 	ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
- 					  desc->infrags, thislen);
- 	if (ret)
-@@ -220,7 +217,7 @@ encryptor(struct scatterlist *sg, void *data)
- 		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
- 				sg->offset + sg->length - fraglen);
- 		desc->infrags[0] = desc->outfrags[0];
--		sg_assign_page(&desc->infrags[0], in_page);
-+		desc->infrags[0].page = in_page;
- 		desc->fragno = 1;
- 		desc->fraglen = fraglen;
- 	} else {
-@@ -285,8 +282,6 @@ decryptor(struct scatterlist *sg, void *data)
- 	if (thislen == 0)
- 		return 0;
- 
--	sg_mark_end(&desc->frags[desc->fragno - 1]);
--
- 	ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
- 					  desc->frags, thislen);
- 	if (ret)
-diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
-index 8710117..0390210 100644
---- a/net/sunrpc/xprtrdma/svc_rdma.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma.c
-@@ -260,7 +260,7 @@ int svc_rdma_init(void)
- 	dprintk("\tmax_inline       : %d\n", svcrdma_max_req_size);
- 	if (!svcrdma_table_header)
- 		svcrdma_table_header =
--			register_sysctl_table(svcrdma_root_table);
-+			register_sysctl_table(svcrdma_root_table, 0);
- 
- 	/* Create the temporary map cache */
- 	svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache",
-diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
-index a564c1a..5dfafc8 100644
---- a/net/sunrpc/xprtrdma/transport.c
-+++ b/net/sunrpc/xprtrdma/transport.c
-@@ -804,7 +804,7 @@ static int __init xprt_rdma_init(void)
- 
- #ifdef RPC_DEBUG
- 	if (!sunrpc_table_header)
--		sunrpc_table_header = register_sysctl_table(sunrpc_table);
-+		sunrpc_table_header = register_sysctl_table(sunrpc_table, 0);
- #endif
- 	return 0;
- }
-
-diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
-index 23a2b8f..9140b97 100644
---- a/net/sunrpc/rpc_pipe.c
-+++ b/net/sunrpc/rpc_pipe.c
-@@ -14,7 +14,7 @@
- #include <linux/pagemap.h>
- #include <linux/mount.h>
- #include <linux/namei.h>
--#include <linux/fsnotify.h>
-+#include <linux/dnotify.h>
- #include <linux/kernel.h>
- 
- #include <asm/ioctls.h>
-@@ -143,7 +143,8 @@ rpc_close_pipes(struct inode *inode)
- 		rpci->nwriters = 0;
- 		if (ops->release_pipe)
- 			ops->release_pipe(inode);
--		cancel_delayed_work_sync(&rpci->queue_timeout);
-+		cancel_delayed_work(&rpci->queue_timeout);
-+		flush_scheduled_work();
- 	}
- 	rpc_inode_setowner(inode, NULL);
- 	mutex_unlock(&inode->i_mutex);
-@@ -224,7 +225,7 @@ out:
- static ssize_t
- rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct rpc_inode *rpci = RPC_I(inode);
- 	struct rpc_pipe_msg *msg;
- 	int res = 0;
-@@ -267,7 +268,7 @@ out_unlock:
- static ssize_t
- rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct rpc_inode *rpci = RPC_I(inode);
- 	int res;
- 
-@@ -285,7 +286,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
- 	struct rpc_inode *rpci;
- 	unsigned int mask = 0;
- 
--	rpci = RPC_I(filp->f_path.dentry->d_inode);
-+	rpci = RPC_I(filp->f_dentry->d_inode);
- 	poll_wait(filp, &rpci->waitq, wait);
- 
- 	mask = POLLOUT | POLLWRNORM;
-@@ -300,7 +301,7 @@ static int
- rpc_pipe_ioctl(struct inode *ino, struct file *filp,
- 		unsigned int cmd, unsigned long arg)
- {
--	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
-+	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
- 	int len;
- 
- 	switch (cmd) {
-@@ -448,7 +449,7 @@ struct vfsmount *rpc_get_mount(void)
- {
- 	int err;
- 
--	err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
-+	err = simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count);
- 	if (err != 0)
- 		return ERR_PTR(err);
- 	return rpc_mount;
-@@ -471,19 +472,22 @@ static struct dentry_operations rpc_dentry_operations = {
- static int
- rpc_lookup_parent(char *path, struct nameidata *nd)
- {
--	struct vfsmount *mnt;
--
- 	if (path[0] == '\0')
- 		return -ENOENT;
- 
--	mnt = rpc_get_mount();
--	if (IS_ERR(mnt)) {
-+	nd->mnt = rpc_get_mount();
-+	if (IS_ERR(nd->mnt)) {
- 		printk(KERN_WARNING "%s: %s failed to mount "
- 			       "pseudofilesystem \n", __FILE__, __func__);
--		return PTR_ERR(mnt);
-+		return PTR_ERR(nd->mnt);
- 	}
-+	mntget(nd->mnt);
-+	nd->dentry = dget(rpc_mount->mnt_root);
-+	nd->last_type = LAST_ROOT;
-+	nd->flags = LOOKUP_PARENT;
-+	nd->depth = 0;
- 
--	if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
-+	if (path_walk(path, nd)) {
- 		printk(KERN_WARNING "%s: %s failed to find path %s\n",
- 				__FILE__, __func__, path);
- 		rpc_put_mount();
-@@ -495,7 +499,8 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
- static void
- rpc_release_path(struct nameidata *nd)
- {
--	path_put(&nd->path);
-+	dput(nd->dentry);
-+	mntput(nd->mnt);
- 	rpc_put_mount();
- }
- 
-@@ -591,13 +596,12 @@ rpc_populate(struct dentry *parent,
- 		}
- 		inode->i_ino = i;
- 		if (files[i].i_fop)
--			inode->i_fop = files[i].i_fop;
-+			inode->i_fop = (struct file_operations *) files[i].i_fop;
- 		if (private)
- 			rpc_inode_setowner(inode, private);
- 		if (S_ISDIR(mode))
- 			inc_nlink(dir);
- 		d_add(dentry, inode);
--		fsnotify_create(dir, dentry);
- 	}
- 	mutex_unlock(&dir->i_mutex);
- 	return 0;
-@@ -619,7 +623,7 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry)
- 	inode->i_ino = iunique(dir->i_sb, 100);
- 	d_instantiate(dentry, inode);
- 	inc_nlink(dir);
--	fsnotify_mkdir(dir, dentry);
-+	inode_dir_notify(dir, DN_CREATE);
- 	return 0;
- out_err:
- 	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
-@@ -668,8 +672,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
- 
- 	if ((error = rpc_lookup_parent(path, nd)) != 0)
- 		return ERR_PTR(error);
--	dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
--				   1);
-+	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1);
- 	if (IS_ERR(dentry))
- 		rpc_release_path(nd);
- 	return dentry;
-@@ -696,7 +699,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
- 	dentry = rpc_lookup_negative(path, &nd);
- 	if (IS_ERR(dentry))
- 		return dentry;
--	dir = nd.path.dentry->d_inode;
-+	dir = nd.dentry->d_inode;
- 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
- 		goto err_dput;
- 	RPC_I(dentry->d_inode)->private = rpc_client;
-@@ -788,14 +791,14 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi
- 	if (!inode)
- 		goto err_dput;
- 	inode->i_ino = iunique(dir->i_sb, 100);
--	inode->i_fop = &rpc_pipe_fops;
-+	inode->i_fop = (struct file_operations *) &rpc_pipe_fops;
- 	d_instantiate(dentry, inode);
- 	rpci = RPC_I(inode);
- 	rpci->private = private;
- 	rpci->flags = flags;
- 	rpci->ops = ops;
- 	rpci->nkern_readwriters = 1;
--	fsnotify_create(dir, dentry);
-+	inode_dir_notify(dir, DN_CREATE);
- 	dget(dentry);
- out:
- 	mutex_unlock(&dir->i_mutex);
-@@ -882,11 +885,10 @@ out:
- 	return -ENOMEM;
- }
- 
--static int
--rpc_get_sb(struct file_system_type *fs_type,
--		int flags, const char *dev_name, void *data, struct vfsmount *mnt)
-+static struct super_block *
-+rpc_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
- {
--	return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
-+	return get_sb_single(fs_type, flags, data, rpc_fill_super);
- }
- 
- static struct file_system_type rpc_pipe_fs_type = {
-diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
-index 4486c59..0dbff53 100644
---- a/net/sunrpc/xprtsock.c
-+++ b/net/sunrpc/xprtsock.c
-@@ -792,7 +792,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
- 
- 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
- 
--	cancel_rearming_delayed_work(&transport->connect_worker);
-+	cancel_delayed_work(&transport->connect_worker);
- 
- 	xs_close(xprt);
- 	xs_free_peer_addresses(xprt);
-@@ -856,12 +856,8 @@ static void xs_udp_data_ready(struct sock *sk, int len)
- 		copied = repsize;
- 
- 	/* Suck it into the iovec, verify checksum if not done by hw. */
--	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
--		UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
-+	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
- 		goto out_unlock;
--	}
--
--	UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
- 
- 	/* Something worked... */
- 	dst_confirm(skb->dst);
-@@ -2064,7 +2060,7 @@ int init_socket_xprt(void)
- {
- #ifdef RPC_DEBUG
- 	if (!sunrpc_table_header)
--		sunrpc_table_header = register_sysctl_table(sunrpc_table);
-+		sunrpc_table_header = register_sysctl_table(sunrpc_table, 0);
- #endif
- 
- 	xprt_register_transport(&xs_udp_transport);
-diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
-index c996671..0d27db6 100644
---- a/net/sunrpc/cache.c
-+++ b/net/sunrpc/cache.c
-@@ -316,28 +316,31 @@ static int create_cache_proc_entries(struct cache_detail *cd)
- 	cd->proc_ent->owner = cd->owner;
- 	cd->channel_ent = cd->content_ent = NULL;
- 
--	p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
--			     cd->proc_ent, &cache_flush_operations, cd);
-+	p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
- 	cd->flush_ent = p;
- 	if (p == NULL)
- 		goto out_nomem;
-+	p->proc_fops = (struct file_operations *) &cache_flush_operations;
- 	p->owner = cd->owner;
-+	p->data = cd;
- 
- 	if (cd->cache_request || cd->cache_parse) {
--		p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
--				     cd->proc_ent, &cache_file_operations, cd);
-+		p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
- 		cd->channel_ent = p;
- 		if (p == NULL)
- 			goto out_nomem;
-+		p->proc_fops = (struct file_operations *) &cache_file_operations;
- 		p->owner = cd->owner;
-+		p->data = cd;
- 	}
- 	if (cd->cache_show) {
--		p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
--				cd->proc_ent, &content_file_operations, cd);
-+		p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
- 		cd->content_ent = p;
- 		if (p == NULL)
- 			goto out_nomem;
-+		p->proc_fops = (struct file_operations *) &content_file_operations;
- 		p->owner = cd->owner;
-+		p->data = cd;
- 	}
- 	return 0;
- out_nomem:
-@@ -393,7 +396,8 @@ void cache_unregister(struct cache_detail *cd)
- 	remove_cache_proc_entries(cd);
- 	if (list_empty(&cache_list)) {
- 		/* module must be being unloaded so its safe to kill the worker */
--		cancel_delayed_work_sync(&cache_cleaner);
-+		cancel_delayed_work(&cache_cleaner);
-+		flush_scheduled_work();
- 	}
- 	return;
- out:
-@@ -696,7 +700,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
- {
- 	struct cache_reader *rp = filp->private_data;
- 	struct cache_request *rq;
--	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
- 	int err;
- 
- 	if (count == 0)
-@@ -773,7 +777,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
- 	    loff_t *ppos)
- {
- 	int err;
--	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
- 
- 	if (count == 0)
- 		return 0;
-@@ -804,7 +808,7 @@ cache_poll(struct file *filp, poll_table *wait)
- 	unsigned int mask;
- 	struct cache_reader *rp = filp->private_data;
- 	struct cache_queue *cq;
--	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
- 
- 	poll_wait(filp, &queue_wait, wait);
- 
-@@ -1248,15 +1252,22 @@ static const struct seq_operations cache_content_op = {
- 
- static int content_open(struct inode *inode, struct file *file)
- {
-+	int res;
- 	struct handle *han;
- 	struct cache_detail *cd = PDE(inode)->data;
- 
--	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
-+	han = kmalloc(sizeof(*han), GFP_KERNEL);
- 	if (han == NULL)
- 		return -ENOMEM;
- 
- 	han->cd = cd;
--	return 0;
-+
-+	res = seq_open(file, (struct seq_operations *) &cache_content_op);
-+       if (res)
-+               kfree(han);
-+       else
-+               ((struct seq_file *)file->private_data)->private = han;
-+       return res;
- }
- 
- static const struct file_operations content_file_operations = {
-@@ -1269,7 +1280,7 @@ static const struct file_operations content_file_operations = {
- static ssize_t read_flush(struct file *file, char __user *buf,
- 			    size_t count, loff_t *ppos)
- {
--	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
- 	char tbuf[20];
- 	unsigned long p = *ppos;
- 	size_t len;
-@@ -1290,7 +1301,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
- static ssize_t write_flush(struct file * file, const char __user * buf,
- 			     size_t count, loff_t *ppos)
- {
--	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
- 	char tbuf[20];
- 	char *ep;
- 	long flushtime;
-diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
-index 24db2b4..6f9e46c 100644
---- a/net/sunrpc/rpcb_clnt.c
-+++ b/net/sunrpc/rpcb_clnt.c
-@@ -117,18 +117,6 @@ static void rpcb_map_release(void *data)
- 	kfree(map);
- }
- 
--static const struct sockaddr_in rpcb_inaddr_loopback = {
--	.sin_family		= AF_INET,
--	.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
--	.sin_port		= htons(RPCBIND_PORT),
--};
--
--static const struct sockaddr_in6 rpcb_in6addr_loopback = {
--	.sin6_family		= AF_INET6,
--	.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
--	.sin6_port		= htons(RPCBIND_PORT),
--};
--
- static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
- 					  size_t addrlen, u32 version)
- {
-@@ -249,6 +237,12 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
- 		.rpc_resp	= okay,
- 	};
- 
-+	struct sockaddr_in rpcb_inaddr_loopback = {
-+		.sin_family		= AF_INET,
-+		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
-+		.sin_port		= htons(RPCBIND_PORT),
-+	};
-+
- 	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
- 			"rpcbind\n", (port ? "" : "un"),
- 			prog, vers, prot, port);
-@@ -272,6 +266,12 @@ static int rpcb_register_netid4(struct sockaddr_in *address_to_register,
- 	unsigned short port = ntohs(address_to_register->sin_port);
- 	char buf[32];
- 
-+	struct sockaddr_in rpcb_inaddr_loopback = {
-+		.sin_family		= AF_INET,
-+		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
-+		.sin_port		= htons(RPCBIND_PORT),
-+	};
-+
- 	/* Construct AF_INET universal address */
- 	snprintf(buf, sizeof(buf),
- 			NIPQUAD_FMT".%u.%u",
-@@ -303,6 +303,12 @@ static int rpcb_register_netid6(struct sockaddr_in6 *address_to_register,
- 	unsigned short port = ntohs(address_to_register->sin6_port);
- 	char buf[64];
- 
-+	struct sockaddr_in6 rpcb_in6addr_loopback = {
-+		.sin6_family		= AF_INET6,
-+		.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
-+		.sin6_port		= htons(RPCBIND_PORT),
-+	};
-+
- 	/* Construct AF_INET6 universal address */
- 	snprintf(buf, sizeof(buf),
- 			NIP6_FMT".%u.%u",

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.17/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,13 +5,13 @@
 
 Signed-off-by: Doron Shoham <dorons at voltaire.com>
 ---
- drivers/scsi/scsi_transport_iscsi.c |   95 ++++++++++++++++++++----------------
- 1 file changed, 55 insertions(+), 40 deletions(-)
+ drivers/scsi/scsi_transport_iscsi.c |   97 +++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 40 deletions(-)
 
-Index: ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+Index: ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 ===================================================================
---- ofed_kernel.orig/drivers/scsi/scsi_transport_iscsi.c
-+++ ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+--- ofa_kernel-1.4.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 @@ -20,6 +20,8 @@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -21,7 +21,18 @@
  #include <linux/module.h>
  #include <linux/mutex.h>
  #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(stru
+@@ -378,8 +380,10 @@ static void __iscsi_unblock_session(stru
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ 	struct iscsi_host *ihost = shost->shost_data;
++#endif
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -397,10 +401,12 @@ static void __iscsi_unblock_session(stru
  	 * the async scanning code (drivers like iscsi_tcp do login and
  	 * scanning from userspace).
  	 */
@@ -38,7 +49,7 @@
  }
  
  /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+@@ -1294,45 +1300,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
   * Malformed skbs with wrong lengths or invalid creds are not processed.
   */
  static void
@@ -129,7 +140,7 @@
  	}
  	mutex_unlock(&rx_queue_mutex);
  }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(v
+@@ -1738,7 +1755,7 @@ static __init int iscsi_transport_init(v
  	return 0;
  
  release_nls:
@@ -138,7 +149,7 @@
  unregister_session_class:
  	transport_class_unregister(&iscsi_session_class);
  unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
+@@ -1753,7 +1770,7 @@ unregister_transport_class:
  static void __exit iscsi_transport_exit(void)
  {
  	destroy_workqueue(iscsi_eh_timer_workq);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_fs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_fs.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_fs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,5 +1,39 @@
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index cc91227..262397b 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -361,11 +361,14 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	const struct export_operations *nop = mnt->mnt_sb->s_export_op;
+ 	struct dentry *result, *alias;
+ 	int err;
++	__u32 objp[2];
+ 
++	objp[0] = fid->i32.ino;
++	objp[1] = fid->i32.gen;
+ 	/*
+ 	 * Try to get any dentry for the given file handle from the filesystem.
+ 	 */
+-	result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
++	result = nop->get_dentry(mnt->mnt_sb, &objp);
+ 	if (!result)
+ 		result = ERR_PTR(-ESTALE);
+ 	if (IS_ERR(result))
+@@ -417,11 +420,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 		 * file handle.  If this fails we'll have to give up.
+ 		 */
+ 		err = -ESTALE;
+-		if (!nop->fh_to_parent)
++		if (!nop->get_parent)
+ 			goto err_result;
+ 
+-		target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
+-				fh_len, fileid_type);
++		target_dir = nop->get_parent(result);
+ 		if (!target_dir)
+ 			goto err_result;
+ 		err = PTR_ERR(target_dir);
 diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
-index 0b45fd3..04761ed 100644
+index 0b45fd3..2c45814 100644
 --- a/fs/lockd/clntlock.c
 +++ b/fs/lockd/clntlock.c
 @@ -168,7 +168,7 @@ __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock
@@ -7,7 +41,7 @@
  		if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
  			continue;
 -		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
-+		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode) ,fh) != 0)
++		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode), fh) != 0)
  			continue;
  		/* Alright, we found a lock. Set the return status
  		 * and wake up the caller
@@ -31,21 +65,21 @@
  	lock->svid = fl->fl_u.nfs_fl.owner->pid;
  	lock->fl.fl_start = fl->fl_start;
  	lock->fl.fl_end = fl->fl_end;
-diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
-index 5bd9bf0..0dcdb49 100644
---- a/fs/lockd/svc.c
-+++ b/fs/lockd/svc.c
-@@ -506,7 +506,7 @@ module_param(nsm_use_hostnames, bool, 0644);
- static int __init init_nlm(void)
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index e4d5635..771edc1 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -194,7 +194,7 @@ static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
+  */
+ static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
  {
- #ifdef CONFIG_SYSCTL
--	nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root);
-+	nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root, 0);
- 	return nlm_sysctl_table ? 0 : -ENOMEM;
- #else
- 	return 0;
+-	p = xdr_encode_nsm_string(p, utsname()->nodename);
++	p = xdr_encode_nsm_string(p, system_utsname.nodename);
+ 	if (!p)
+ 		return ERR_PTR(-EIO);
+ 
 diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
-index cf0d5c2..40d690a 100644
+index cf0d5c2..2e6ec1b 100644
 --- a/fs/lockd/svclock.c
 +++ b/fs/lockd/svclock.c
 @@ -304,7 +304,7 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
@@ -68,41 +102,38 @@
  				lock->fl.fl_type, lock->fl.fl_pid,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end,
-@@ -408,18 +408,18 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
- 
- 	if (!wait)
- 		lock->fl.fl_flags &= ~FL_SLEEP;
--	error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
-+	error = posix_lock_file(file->f_file, &lock->fl);
- 	lock->fl.fl_flags &= ~FL_SLEEP;
- 
--	dprintk("lockd: vfs_lock_file returned %d\n", error);
-+	dprintk("lockd: posix_lock_file returned %d\n", error);
- 	switch (error) {
- 		case 0:
+@@ -417,11 +417,18 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
  			ret = nlm_granted;
  			goto out;
  		case -EAGAIN:
- 			ret = nlm_lck_denied;
+-			ret = nlm_lck_denied;
 -			goto out;
--		case FILE_LOCK_DEFERRED:
-+			break;
-+		case -EINPROGRESS:
- 			if (wait)
++			if (wait) {
++				ret = nlm_lck_blocked;
++				break;
++			} else {
++				ret = nlm_lck_denied;
++				goto out;
++			}
+ 		case FILE_LOCK_DEFERRED:
+-			if (wait)
++			if (wait) {
++				ret = nlm_lck_blocked;
  				break;
++			}
  			/* Filesystem lock operation is in progress
-@@ -434,6 +434,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			   Add it to the queue waiting for callback */
+ 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
+@@ -434,8 +441,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
  			goto out;
  	}
  
-+	ret = nlm_lck_denied;
-+	if (!wait)
-+		goto out;
-+
- 	ret = nlm_lck_blocked;
- 
+-	ret = nlm_lck_blocked;
+-
  	/* Append to list of blocked */
-@@ -458,8 +462,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 	nlmsvc_insert_block(block, NLM_NEVER);
+ out:
+@@ -458,8 +463,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
  	__be32			ret;
  
  	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
@@ -113,18 +144,7 @@
  				lock->fl.fl_type,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end);
-@@ -502,8 +506,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
- 		goto out;
- 	}
- 
--	error = vfs_test_lock(file->f_file, &lock->fl);
--	if (error == FILE_LOCK_DEFERRED) {
-+	error = posix_test_lock(file->f_file, &lock->fl, NULL);
-+	if (error == -EINPROGRESS) {
- 		ret = nlmsvc_defer_lock_rqst(rqstp, block);
- 		goto out;
- 	}
-@@ -547,8 +551,8 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
+@@ -547,8 +552,8 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
  	int	error;
  
  	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
@@ -135,16 +155,7 @@
  				lock->fl.fl_pid,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end);
-@@ -557,7 +561,7 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
- 	nlmsvc_cancel_blocked(file, lock);
- 
- 	lock->fl.fl_type = F_UNLCK;
--	error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
-+	error = posix_lock_file(file->f_file, &lock->fl);
- 
- 	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
- }
-@@ -576,8 +580,8 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
+@@ -576,8 +581,8 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
  	int status = 0;
  
  	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
@@ -155,43 +166,80 @@
  				lock->fl.fl_pid,
  				(long long)lock->fl.fl_start,
  				(long long)lock->fl.fl_end);
-@@ -586,8 +590,6 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
- 	block = nlmsvc_lookup_block(file, lock);
- 	mutex_unlock(&file->f_mutex);
- 	if (block != NULL) {
--		vfs_cancel_lock(block->b_file->f_file,
--				&block->b_call->a_args.lock.fl);
- 		status = nlmsvc_unlink_block(block);
- 		nlmsvc_release_block(block);
- 	}
-@@ -615,7 +617,7 @@ nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
- 		block->b_flags |= B_TIMED_OUT;
- 	if (conf) {
- 		if (block->b_fl)
--			__locks_copy_lock(block->b_fl, conf);
-+			locks_copy_lock(block->b_fl, conf);
- 	}
+@@ -595,63 +600,6 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
  }
  
-@@ -721,13 +723,14 @@ nlmsvc_grant_blocked(struct nlm_block *block)
+ /*
+- * This is a callback from the filesystem for VFS file lock requests.
+- * It will be used if fl_grant is defined and the filesystem can not
+- * respond to the request immediately.
+- * For GETLK request it will copy the reply to the nlm_block.
+- * For SETLK or SETLKW request it will get the local posix lock.
+- * In all cases it will move the block to the head of nlm_blocked q where
+- * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
+- * deferred rpc for GETLK and SETLK.
+- */
+-static void
+-nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
+-			     int result)
+-{
+-	block->b_flags |= B_GOT_CALLBACK;
+-	if (result == 0)
+-		block->b_granted = 1;
+-	else
+-		block->b_flags |= B_TIMED_OUT;
+-	if (conf) {
+-		if (block->b_fl)
+-			__locks_copy_lock(block->b_fl, conf);
+-	}
+-}
+-
+-static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
+-					int result)
+-{
+-	struct nlm_block *block;
+-	int rc = -ENOENT;
+-
+-	lock_kernel();
+-	list_for_each_entry(block, &nlm_blocked, b_list) {
+-		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
+-			dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
+-							block, block->b_flags);
+-			if (block->b_flags & B_QUEUED) {
+-				if (block->b_flags & B_TIMED_OUT) {
+-					rc = -ENOLCK;
+-					break;
+-				}
+-				nlmsvc_update_deferred_block(block, conf, result);
+-			} else if (result == 0)
+-				block->b_granted = 1;
+-
+-			nlmsvc_insert_block(block, 0);
+-			svc_wake_up(block->b_daemon);
+-			rc = 0;
+-			break;
+-		}
+-	}
+-	unlock_kernel();
+-	if (rc == -ENOENT)
+-		printk(KERN_WARNING "lockd: grant for unknown block\n");
+-	return rc;
+-}
+-
+-/*
+  * Unblock a blocked lock request. This is a callback invoked from the
+  * VFS layer when a lock on which we blocked is removed.
+  *
+@@ -683,7 +631,6 @@ static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
+ struct lock_manager_operations nlmsvc_lock_operations = {
+ 	.fl_compare_owner = nlmsvc_same_owner,
+ 	.fl_notify = nlmsvc_notify_blocked,
+-	.fl_grant = nlmsvc_grant_deferred,
+ };
  
- 	/* Try the lock operation again */
- 	lock->fl.fl_flags |= FL_SLEEP;
--	error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
-+	error = posix_lock_file(file->f_file, &lock->fl);
- 	lock->fl.fl_flags &= ~FL_SLEEP;
- 
- 	switch (error) {
- 	case 0:
- 		break;
--	case FILE_LOCK_DEFERRED:
-+	case -EAGAIN:
-+	case -EINPROGRESS:
- 		dprintk("lockd: lock still blocked error %d\n", error);
- 		nlmsvc_insert_block(block, NLM_NEVER);
- 		nlmsvc_release_block(block);
+ /*
 diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
-index 198b4e5..6ef227e 100644
+index 198b4e5..2109091 100644
 --- a/fs/lockd/svcsubs.c
 +++ b/fs/lockd/svcsubs.c
 @@ -45,7 +45,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
@@ -203,15 +251,6 @@
  
  	dprintk("lockd: %s %s/%ld\n",
  		msg, inode->i_sb->s_id, inode->i_ino);
-@@ -184,7 +184,7 @@ again:
- 			lock.fl_type  = F_UNLCK;
- 			lock.fl_start = 0;
- 			lock.fl_end   = OFFSET_MAX;
--			if (vfs_lock_file(file->f_file, F_SETLK, &lock, NULL) < 0) {
-+			if (posix_lock_file(file->f_file, &lock) < 0) {
- 				printk("lockd: unlock failure in %s:%d\n",
- 						__FILE__, __LINE__);
- 				return 1;
 @@ -396,7 +396,7 @@ nlmsvc_match_sb(void *datap, struct nlm_file *file)
  {
  	struct super_block *sb = datap;
@@ -221,210 +260,77 @@
  }
  
  /**
-diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
-index e4d5635..771edc1 100644
---- a/fs/lockd/mon.c
-+++ b/fs/lockd/mon.c
-@@ -194,7 +194,7 @@ static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
-  */
- static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
- {
--	p = xdr_encode_nsm_string(p, utsname()->nodename);
-+	p = xdr_encode_nsm_string(p, system_utsname.nodename);
- 	if (!p)
- 		return ERR_PTR(-EIO);
+diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
+index ac6170c..78eca38 100644
+--- a/fs/nfs/Makefile
++++ b/fs/nfs/Makefile
+@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
  
+ nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
+ 			   direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+-			   write.o namespace.o mount_clnt.o
++			   write.o namespace.o mount_clnt.o backport-namespace.o \
++			   backport-writeback.o
+ nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
+ nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
+ nfs-$(CONFIG_NFS_V3_ACL)	+= nfs3acl.o
+diff --git a/fs/nfs/backport-namespace.c b/fs/nfs/backport-namespace.c
+new file mode 100644
+index 0000000..de57f8b
+--- /dev/null
++++ b/fs/nfs/backport-namespace.c
+@@ -0,0 +1 @@
++#include "src/namespace.c"
+diff --git a/fs/nfs/backport-writeback.c b/fs/nfs/backport-writeback.c
+new file mode 100644
+index 0000000..b838ead
+--- /dev/null
++++ b/fs/nfs/backport-writeback.c
+@@ -0,0 +1 @@
++#include "src/writeback.c"
 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
-index 5ee23e7..01ed2a8 100644
+index 5ee23e7..afbb834 100644
 --- a/fs/nfs/client.c
 +++ b/fs/nfs/client.c
-@@ -395,7 +395,7 @@ found_client:
- 	if (new)
- 		nfs_free_client(new);
- 
--	error = wait_event_killable(nfs_client_active_wq,
-+	error = wait_event_interruptible(nfs_client_active_wq,
- 				clp->cl_cons_state != NFS_CS_INITING);
- 	if (error < 0) {
- 		nfs_put_client(clp);
-@@ -601,6 +601,10 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
- 	if (server->flags & NFS_MOUNT_SOFT)
- 		server->client->cl_softrtry = 1;
- 
-+	server->client->cl_intr = 0;
-+	if (server->flags & NFS4_MOUNT_INTR)
-+		server->client->cl_intr = 1;
-+
- 	return 0;
- }
- 
-@@ -796,10 +800,6 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
- 		goto out_error;
- 
- 	nfs_server_set_fsinfo(server, &fsinfo);
--	error = bdi_init(&server->backing_dev_info);
--	if (error)
--		goto out_error;
--
- 
- 	/* Get some general file system info */
- 	if (server->namelen == 0) {
-@@ -885,7 +885,6 @@ void nfs_free_server(struct nfs_server *server)
- 	nfs_put_client(server->nfs_client);
- 
- 	nfs_free_iostats(server->io_stats);
--	bdi_destroy(&server->backing_dev_info);
- 	kfree(server);
- 	nfs_release_automount_timer();
- 	dprintk("<-- nfs_free_server()\n");
-@@ -1372,9 +1371,23 @@ static int nfs_server_list_open(struct inode *inode, struct file *file)
-  */
- static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos)
- {
-+	struct list_head *_p;
-+	loff_t pos = *_pos;
-+
- 	/* lock the list against modification */
- 	spin_lock(&nfs_client_lock);
--	return seq_list_start_head(&nfs_client_list, *_pos);
-+
-+	/* allow for the header line */
-+	if (!pos)
-+		return SEQ_START_TOKEN;
-+	pos--;
-+
-+	/* find the n'th element in the list */
-+	list_for_each(_p, &nfs_client_list)
-+		if (!pos--)
-+			break;
-+
-+	return _p != &nfs_client_list ? _p : NULL;
- }
- 
- /*
-@@ -1382,7 +1395,14 @@ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos)
-  */
- static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos)
- {
--	return seq_list_next(v, &nfs_client_list, pos);
-+	struct list_head *_p;
-+
-+	(*pos)++;
-+
-+	_p = v;
-+	_p = (v == SEQ_START_TOKEN) ? nfs_client_list.next : _p->next;
-+
-+	return _p != &nfs_client_list ? _p : NULL;
- }
- 
- /*
-@@ -1401,7 +1421,7 @@ static int nfs_server_list_show(struct seq_file *m, void *v)
- 	struct nfs_client *clp;
- 
- 	/* display header on line 1 */
--	if (v == &nfs_client_list) {
-+	if (v == SEQ_START_TOKEN) {
- 		seq_puts(m, "NV SERVER   PORT USE HOSTNAME\n");
- 		return 0;
+@@ -248,6 +248,7 @@ static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
+ 				(const struct sockaddr_in6 *)sa2);
  	}
-@@ -1442,9 +1462,23 @@ static int nfs_volume_list_open(struct inode *inode, struct file *file)
-  */
- static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos)
- {
-+	struct list_head *_p;
-+	loff_t pos = *_pos;
-+
- 	/* lock the list against modification */
- 	spin_lock(&nfs_client_lock);
--	return seq_list_start_head(&nfs_volume_list, *_pos);
-+
-+	/* allow for the header line */
-+	if (!pos)
-+		return SEQ_START_TOKEN;
-+	pos--;
-+
-+	/* find the n'th element in the list */
-+	list_for_each(_p, &nfs_volume_list)
-+		if (!pos--)
-+			break;
-+
-+	return _p != &nfs_volume_list ? _p : NULL;
+ 	BUG();
++	return -EINVAL;
  }
  
  /*
-@@ -1452,7 +1486,14 @@ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos)
-  */
- static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos)
- {
--	return seq_list_next(v, &nfs_volume_list, pos);
-+	struct list_head *_p;
-+
-+	(*pos)++;
-+
-+	_p = v;
-+	_p = (v == SEQ_START_TOKEN) ? nfs_volume_list.next : _p->next;
-+
-+	return _p != &nfs_volume_list ? _p : NULL;
- }
- 
- /*
-@@ -1473,7 +1514,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v)
- 	char dev[8], fsid[17];
- 
- 	/* display header on line 1 */
--	if (v == &nfs_volume_list) {
-+	if (v == SEQ_START_TOKEN) {
- 		seq_puts(m, "NV SERVER   PORT DEV     FSID\n");
- 		return 0;
- 	}
-@@ -1512,16 +1553,18 @@ int __init nfs_fs_proc_init(void)
- 	proc_fs_nfs->owner = THIS_MODULE;
- 
- 	/* a file of servers with which we're dealing */
--	p = proc_create("servers", S_IFREG|S_IRUGO,
--			proc_fs_nfs, &nfs_server_list_fops);
-+	p = create_proc_entry("servers", S_IFREG|S_IRUGO, proc_fs_nfs);
- 	if (!p)
- 		goto error_1;
-+	p->proc_fops = &nfs_server_list_fops; 
-+	p->owner = THIS_MODULE;
- 
- 	/* a file of volumes that we have mounted */
--	p = proc_create("volumes", S_IFREG|S_IRUGO,
--			proc_fs_nfs, &nfs_volume_list_fops);
-+	p = create_proc_entry("volumes", S_IFREG|S_IRUGO, proc_fs_nfs);
- 	if (!p)
- 		goto error_2;
-+	p->proc_fops =  &nfs_volume_list_fops;
-+	p->owner = THIS_MODULE;
- 	return 0;
- 
- error_2:
-diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
-index 24241fc..61ea6de 100644
---- a/fs/nfs/internal.h
-+++ b/fs/nfs/internal.h
-@@ -3,7 +3,6 @@
-  */
- 
- #include <linux/mount.h>
--#include <linux/security.h>
- 
- struct nfs_string;
- 
-@@ -59,8 +58,6 @@ struct nfs_parsed_mount_data {
- 		unsigned short		port;
- 		unsigned short		protocol;
- 	} nfs_server;
--
--	struct security_mnt_opts lsm_opts;
- };
- 
- /* client.c */
 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
-index 74f92b7..4dc892e 100644
+index 74f92b7..90d0a97 100644
 --- a/fs/nfs/dir.c
 +++ b/fs/nfs/dir.c
+@@ -66,7 +66,7 @@ const struct file_operations nfs_dir_operations = {
+ 	.fsync		= nfs_fsync_dir,
+ };
+ 
+-const struct inode_operations nfs_dir_inode_operations = {
++struct inode_operations nfs_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -82,7 +82,7 @@ const struct inode_operations nfs_dir_inode_operations = {
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_dir_inode_operations = {
++struct inode_operations nfs3_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -105,7 +105,7 @@ const struct inode_operations nfs3_dir_inode_operations = {
+ #ifdef CONFIG_NFS_V4
+ 
+ static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
+-const struct inode_operations nfs4_dir_inode_operations = {
++struct inode_operations nfs4_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_atomic_lookup,
+ 	.link		= nfs_link,
 @@ -134,8 +134,8 @@ nfs_opendir(struct inode *inode, struct file *filp)
  	int res;
  
@@ -504,7 +410,7 @@
  		return 0;
  	/* Are we trying to write to a read only partition? */
 -	if (__mnt_is_readonly(nd->path.mnt) &&
-+	if (IS_RDONLY(dir) &&
++	if (__mnt_is_readonly(nd->mnt) &&
  	    (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
  		return 0;
  	return 1;
@@ -517,7 +423,7 @@
  	struct inode *dir = parent->d_inode;
  	struct nfs_entry *entry = desc->entry;
  	struct dentry *dentry, *alias;
-@@ -1907,17 +1907,17 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+@@ -1907,7 +1907,7 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
  	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
  }
  
@@ -526,11 +432,8 @@
  {
  	struct rpc_cred *cred;
  	int res = 0;
- 
- 	nfs_inc_stats(inode, NFSIOS_VFSACCESS);
- 
--	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
-+	if (mask == 0)
+@@ -1917,7 +1917,7 @@ int nfs_permission(struct inode *inode, int mask)
+ 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
  		goto out;
  	/* Is this sys_access() ? */
 -	if (mask & MAY_ACCESS)
@@ -548,625 +451,8 @@
  				goto out;
  			break;
  		case S_IFDIR:
-diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
-index fae9719..9abe180 100644
---- a/fs/nfs/getroot.c
-+++ b/fs/nfs/getroot.c
-@@ -30,7 +30,7 @@
- #include <linux/nfs_idmap.h>
- #include <linux/vfs.h>
- #include <linux/namei.h>
--#include <linux/mnt_namespace.h>
-+#include <linux/namespace.h>
- #include <linux/security.h>
- 
- #include <asm/system.h>
-@@ -96,7 +96,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
- 	inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
- 	if (IS_ERR(inode)) {
- 		dprintk("nfs_get_root: get root inode failed\n");
--		return ERR_CAST(inode);
-+		return ERR_PTR(PTR_ERR(inode));
- 	}
- 
- 	error = nfs_superblock_set_dummy_root(sb, inode);
-@@ -266,7 +266,7 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
- 	inode = nfs_fhget(sb, mntfh, &fattr);
- 	if (IS_ERR(inode)) {
- 		dprintk("nfs_get_root: get root inode failed\n");
--		return ERR_CAST(inode);
-+		return ERR_PTR(PTR_ERR(inode));
- 	}
- 
- 	error = nfs_superblock_set_dummy_root(sb, inode);
-diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
-index 52daefa..13cc0e0 100644
---- a/fs/nfs/inode.c
-+++ b/fs/nfs/inode.c
-@@ -279,28 +279,36 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
- 		/* Why so? Because we want revalidate for devices/FIFOs, and
- 		 * that's precisely what we have in nfs_file_inode_operations.
- 		 */
--		inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
-+		inode->i_op = (struct inode_operations *)
-+			NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
- 		if (S_ISREG(inode->i_mode)) {
--			inode->i_fop = &nfs_file_operations;
--			inode->i_data.a_ops = &nfs_file_aops;
-+			inode->i_fop = (struct file_operations  *)
-+				&nfs_file_operations;
-+			inode->i_data.a_ops = (struct address_space_operations *)
-+				&nfs_file_aops;
- 			inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
- 		} else if (S_ISDIR(inode->i_mode)) {
--			inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
--			inode->i_fop = &nfs_dir_operations;
-+			inode->i_op = (struct inode_operations *)
-+				NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
-+			inode->i_fop = (struct file_operations  *)
-+				&nfs_dir_operations;
- 			if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
- 			    && fattr->size <= NFS_LIMIT_READDIRPLUS)
- 				set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
- 			/* Deal with crossing mountpoints */
- 			if (!nfs_fsid_equal(&NFS_SB(sb)->fsid, &fattr->fsid)) {
- 				if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
--					inode->i_op = &nfs_referral_inode_operations;
-+					inode->i_op = (struct inode_operations *)
-+						&nfs_referral_inode_operations;
- 				else
--					inode->i_op = &nfs_mountpoint_inode_operations;
-+					inode->i_op = (struct inode_operations *)
-+						&nfs_mountpoint_inode_operations;
- 				inode->i_fop = NULL;
- 				set_bit(NFS_INO_MOUNTPOINT, &nfsi->flags);
- 			}
- 		} else if (S_ISLNK(inode->i_mode))
--			inode->i_op = &nfs_symlink_inode_operations;
-+			inode->i_op = (struct inode_operations *)
-+				&nfs_symlink_inode_operations;
- 		else
- 			init_special_inode(inode, inode->i_mode, fattr->rdev);
- 
-@@ -485,11 +493,15 @@ static int nfs_wait_schedule(void *word)
-  */
- static int nfs_wait_on_inode(struct inode *inode)
- {
-+	struct rpc_clnt	*clnt = NFS_CLIENT(inode);
- 	struct nfs_inode *nfsi = NFS_I(inode);
-+	sigset_t oldmask;
- 	int error;
- 
-+	rpc_clnt_sigmask(clnt, &oldmask);
- 	error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
--					nfs_wait_schedule, TASK_KILLABLE);
-+					nfs_wait_schedule, TASK_INTERRUPTIBLE);
-+	rpc_clnt_sigunmask(clnt, &oldmask);
- 
- 	return error;
- }
-@@ -592,7 +604,8 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int wait)
- 	}
- 	if (ctx->cred != NULL)
- 		put_rpccred(ctx->cred);
--	path_put(&ctx->path);
-+	dput(ctx->path.dentry);
-+	mntput(ctx->path.mnt);
- 	kfree(ctx);
- }
- 
-@@ -612,7 +625,7 @@ static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
-  */
- static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct nfs_inode *nfsi = NFS_I(inode);
- 
- 	filp->private_data = get_nfs_open_context(ctx);
-@@ -644,7 +657,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
- 
- static void nfs_file_clear_open_context(struct file *filp)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct nfs_open_context *ctx = nfs_file_open_context(filp);
- 
- 	if (ctx) {
-@@ -667,7 +680,7 @@ int nfs_open(struct inode *inode, struct file *filp)
- 	cred = rpc_lookup_cred();
- 	if (IS_ERR(cred))
- 		return PTR_ERR(cred);
--	ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
-+	ctx = alloc_nfs_open_context(filp->f_vfsmnt, filp->f_dentry, cred);
- 	put_rpccred(cred);
- 	if (ctx == NULL)
- 		return -ENOMEM;
-diff --git a/fs/nfs/super.c b/fs/nfs/super.c
-index 9abcd2b..4379cf3 100644
---- a/fs/nfs/super.c
-+++ b/fs/nfs/super.c
-@@ -49,7 +49,6 @@
- #include <net/ipv6.h>
- #include <linux/netdevice.h>
- #include <linux/nfs_xdr.h>
--#include <linux/magic.h>
- #include <linux/parser.h>
- 
- #include <asm/system.h>
-@@ -66,6 +65,7 @@
- enum {
- 	/* Mount options that take no arguments */
- 	Opt_soft, Opt_hard,
-+	Opt_intr, Opt_nointr,
- 	Opt_posix, Opt_noposix,
- 	Opt_cto, Opt_nocto,
- 	Opt_ac, Opt_noac,
-@@ -107,8 +107,8 @@ static match_table_t nfs_mount_option_tokens = {
- 
- 	{ Opt_soft, "soft" },
- 	{ Opt_hard, "hard" },
--	{ Opt_deprecated, "intr" },
--	{ Opt_deprecated, "nointr" },
-+	{ Opt_intr, "intr" },
-+	{ Opt_nointr, "nointr" },
- 	{ Opt_posix, "posix" },
- 	{ Opt_noposix, "noposix" },
- 	{ Opt_cto, "cto" },
-@@ -287,10 +287,7 @@ static const struct super_operations nfs4_sops = {
- };
- #endif
- 
--static struct shrinker acl_shrinker = {
--	.shrink		= nfs_access_cache_shrinker,
--	.seeks		= DEFAULT_SEEKS,
--};
-+static struct shrinker *acl_shrinker;
- 
- /*
-  * Register the NFS filesystems
-@@ -311,7 +308,7 @@ int __init register_nfs_fs(void)
- 	if (ret < 0)
- 		goto error_2;
- #endif
--	register_shrinker(&acl_shrinker);
-+	acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
- 	return 0;
- 
- #ifdef CONFIG_NFS_V4
-@@ -329,7 +326,8 @@ error_0:
-  */
- void __exit unregister_nfs_fs(void)
- {
--	unregister_shrinker(&acl_shrinker);
-+	if (acl_shrinker != NULL)
-+		remove_shrinker(acl_shrinker);
- #ifdef CONFIG_NFS_V4
- 	unregister_filesystem(&nfs4_fs_type);
- #endif
-@@ -708,7 +706,6 @@ static void nfs_parse_ipv4_address(char *string, size_t str_len,
- 				   struct sockaddr *sap, size_t *addr_len)
- {
- 	struct sockaddr_in *sin = (struct sockaddr_in *)sap;
--	u8 *addr = (u8 *)&sin->sin_addr.s_addr;
- 
- 	if (str_len <= INET_ADDRSTRLEN) {
- 		dfprintk(MOUNT, "NFS: parsing IPv4 address %*s\n",
-@@ -716,80 +713,21 @@ static void nfs_parse_ipv4_address(char *string, size_t str_len,
- 
- 		sin->sin_family = AF_INET;
- 		*addr_len = sizeof(*sin);
--		if (in4_pton(string, str_len, addr, '\0', NULL))
--			return;
-+		sin->sin_addr.s_addr = in_aton(string);
- 	}
- 
- 	sap->sa_family = AF_UNSPEC;
- 	*addr_len = 0;
- }
- 
--#define IPV6_SCOPE_DELIMITER	'%'
--
--#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
--static void nfs_parse_ipv6_scope_id(const char *string, const size_t str_len,
--				    const char *delim,
--				    struct sockaddr_in6 *sin6)
--{
--	char *p;
--	size_t len;
--
--	if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
--		return ;
--	if (*delim != IPV6_SCOPE_DELIMITER)
--		return;
--
--	len = (string + str_len) - delim - 1;
--	p = kstrndup(delim + 1, len, GFP_KERNEL);
--	if (p) {
--		unsigned long scope_id = 0;
--		struct net_device *dev;
--
--		dev = dev_get_by_name(&init_net, p);
--		if (dev != NULL) {
--			scope_id = dev->ifindex;
--			dev_put(dev);
--		} else {
--			/* scope_id is set to zero on error */
--			strict_strtoul(p, 10, &scope_id);
--		}
--
--		kfree(p);
--		sin6->sin6_scope_id = scope_id;
--		dfprintk(MOUNT, "NFS: IPv6 scope ID = %lu\n", scope_id);
--	}
--}
-+/* No IPV6 for now - Jeff Becker */
- 
- static void nfs_parse_ipv6_address(char *string, size_t str_len,
- 				   struct sockaddr *sap, size_t *addr_len)
- {
--	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
--	u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
--	const char *delim;
--
--	if (str_len <= INET6_ADDRSTRLEN) {
--		dfprintk(MOUNT, "NFS: parsing IPv6 address %*s\n",
--				(int)str_len, string);
--
--		sin6->sin6_family = AF_INET6;
--		*addr_len = sizeof(*sin6);
--		if (in6_pton(string, str_len, addr, IPV6_SCOPE_DELIMITER, &delim)) {
--			nfs_parse_ipv6_scope_id(string, str_len, delim, sin6);
--			return;
--		}
--	}
--
--	sap->sa_family = AF_UNSPEC;
--	*addr_len = 0;
--}
--#else
--static void nfs_parse_ipv6_address(char *string, size_t str_len,
--				   struct sockaddr *sap, size_t *addr_len)
--{
- 	sap->sa_family = AF_UNSPEC;
- 	*addr_len = 0;
- }
--#endif
- 
- /*
-  * Construct a sockaddr based on the contents of a string that contains
-@@ -929,7 +867,7 @@ static void nfs_parse_invalid_value(const char *option)
- static int nfs_parse_mount_options(char *raw,
- 				   struct nfs_parsed_mount_data *mnt)
- {
--	char *p, *string, *secdata;
-+	char *p, *string;
- 	int rc, sloppy = 0, errors = 0;
- 
- 	if (!raw) {
-@@ -938,20 +876,6 @@ static int nfs_parse_mount_options(char *raw,
- 	}
- 	dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw);
- 
--	secdata = alloc_secdata();
--	if (!secdata)
--		goto out_nomem;
--
--	rc = security_sb_copy_data(raw, secdata);
--	if (rc)
--		goto out_security_failure;
--
--	rc = security_sb_parse_opts_str(secdata, &mnt->lsm_opts);
--	if (rc)
--		goto out_security_failure;
--
--	free_secdata(secdata);
--
- 	while ((p = strsep(&raw, ",")) != NULL) {
- 		substring_t args[MAX_OPT_ARGS];
- 		int option, token;
-@@ -976,6 +900,12 @@ static int nfs_parse_mount_options(char *raw,
- 		case Opt_posix:
- 			mnt->flags |= NFS_MOUNT_POSIX;
- 			break;
-+		case Opt_intr:
-+			mnt->flags |= NFS_MOUNT_INTR;
-+			break;
-+		case Opt_nointr:
-+			mnt->flags &= ~NFS_MOUNT_INTR;
-+			break;
- 		case Opt_noposix:
- 			mnt->flags &= ~NFS_MOUNT_POSIX;
- 			break;
-@@ -1284,10 +1214,6 @@ static int nfs_parse_mount_options(char *raw,
- out_nomem:
- 	printk(KERN_INFO "NFS: not enough memory to parse option\n");
- 	return 0;
--out_security_failure:
--	free_secdata(secdata);
--	printk(KERN_INFO "NFS: security options invalid: %d\n", rc);
--	return 0;
- }
- 
- /*
-@@ -1361,10 +1287,11 @@ static int nfs_parse_simple_hostname(const char *dev_name,
- 	if (len > maxnamlen)
- 		goto out_hostname;
- 
-+	*hostname = kzalloc(len, GFP_KERNEL);
- 	/* N.B. caller will free nfs_server.hostname in all cases */
--	*hostname = kstrndup(dev_name, len, GFP_KERNEL);
- 	if (!*hostname)
- 		goto out_nomem;
-+	strncpy(*hostname, dev_name, len - 1);
- 
- 	/* kill possible hostname list: not supported */
- 	comma = strchr(*hostname, ',');
-@@ -1378,9 +1305,10 @@ static int nfs_parse_simple_hostname(const char *dev_name,
- 	len = strlen(colon);
- 	if (len > maxpathlen)
- 		goto out_path;
--	*export_path = kstrndup(colon, len, GFP_KERNEL);
-+	*export_path = kzalloc(len + 1, GFP_KERNEL);
- 	if (!*export_path)
- 		goto out_nomem;
-+	strncpy(*export_path, colon, len);
- 
- 	dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *export_path);
- 	return 0;
-@@ -1427,17 +1355,19 @@ static int nfs_parse_protected_hostname(const char *dev_name,
- 		goto out_hostname;
- 
- 	/* N.B. caller will free nfs_server.hostname in all cases */
--	*hostname = kstrndup(start, len, GFP_KERNEL);
-+	*hostname = kzalloc(len, GFP_KERNEL);
- 	if (*hostname == NULL)
- 		goto out_nomem;
-+	strncpy(*hostname, start, len - 1);
- 
- 	end += 2;
- 	len = strlen(end);
- 	if (len > maxpathlen)
- 		goto out_path;
--	*export_path = kstrndup(end, len, GFP_KERNEL);
-+	*export_path = kzalloc(len, GFP_KERNEL);
- 	if (!*export_path)
- 		goto out_nomem;
-+	strncpy(*export_path, end, len - 1);
- 
- 	return 0;
- 
-@@ -1580,33 +1510,6 @@ static int nfs_validate_mount_data(void *options,
- 			args->auth_flavors[0] = data->pseudoflavor;
- 		if (!args->nfs_server.hostname)
- 			goto out_nomem;
--
--		/*
--		 * The legacy version 6 binary mount data from userspace has a
--		 * field used only to transport selinux information into the
--		 * the kernel.  To continue to support that functionality we
--		 * have a touch of selinux knowledge here in the NFS code. The
--		 * userspace code converted context=blah to just blah so we are
--		 * converting back to the full string selinux understands.
--		 */
--		if (data->context[0]){
--#ifdef CONFIG_SECURITY_SELINUX
--			int rc;
--			char *opts_str = kmalloc(sizeof(data->context) + 8, GFP_KERNEL);
--			if (!opts_str)
--				return -ENOMEM;
--			strcpy(opts_str, "context=");
--			data->context[NFS_MAX_CONTEXT_LEN] = '\0';
--			strcat(opts_str, &data->context[0]);
--			rc = security_sb_parse_opts_str(opts_str, &args->lsm_opts);
--			kfree(opts_str);
--			if (rc)
--				return rc;
--#else
--			return -EINVAL;
--#endif
--		}
--
- 		break;
- 	default: {
- 		int status;
-@@ -1798,7 +1701,7 @@ static void nfs_fill_super(struct super_block *sb,
- 		sb->s_time_gran = 1;
- 	}
- 
--	sb->s_op = &nfs_sops;
-+	sb->s_op = (struct super_operations *) &nfs_sops;
-  	nfs_initialise_sb(sb);
- }
- 
-@@ -1931,11 +1834,6 @@ static int nfs_compare_super(struct super_block *sb, void *data)
- 	return nfs_compare_mount_options(sb, server, mntflags);
- }
- 
--static int nfs_bdi_register(struct nfs_server *server)
--{
--	return bdi_register_dev(&server->backing_dev_info, server->s_dev);
--}
--
- static int nfs_get_sb(struct file_system_type *fs_type,
- 	int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
- {
-@@ -1955,8 +1853,6 @@ static int nfs_get_sb(struct file_system_type *fs_type,
- 	if (data == NULL || mntfh == NULL)
- 		goto out_free_fh;
- 
--	security_init_mnt_opts(&data->lsm_opts);
--
- 	/* Validate the mount data */
- 	error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
- 	if (error < 0)
-@@ -1983,10 +1879,6 @@ static int nfs_get_sb(struct file_system_type *fs_type,
- 	if (s->s_fs_info != server) {
- 		nfs_free_server(server);
- 		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
--		if (error)
--			goto error_splat_super;
- 	}
- 
- 	if (!s->s_root) {
-@@ -2000,10 +1892,6 @@ static int nfs_get_sb(struct file_system_type *fs_type,
- 		goto error_splat_super;
- 	}
- 
--	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
--	if (error)
--		goto error_splat_root;
--
- 	s->s_flags |= MS_ACTIVE;
- 	mnt->mnt_sb = s;
- 	mnt->mnt_root = mntroot;
-@@ -2012,7 +1900,6 @@ static int nfs_get_sb(struct file_system_type *fs_type,
- out:
- 	kfree(data->nfs_server.hostname);
- 	kfree(data->mount_server.hostname);
--	security_free_mnt_opts(&data->lsm_opts);
- out_free_fh:
- 	kfree(mntfh);
- 	kfree(data);
-@@ -2022,8 +1909,6 @@ out_err_nosb:
- 	nfs_free_server(server);
- 	goto out;
- 
--error_splat_root:
--	dput(mntroot);
- error_splat_super:
- 	up_write(&s->s_umount);
- 	deactivate_super(s);
-@@ -2037,7 +1922,6 @@ static void nfs_kill_super(struct super_block *s)
- {
- 	struct nfs_server *server = NFS_SB(s);
- 
--	bdi_unregister(&server->backing_dev_info);
- 	kill_anon_super(s);
- 	nfs_free_server(server);
- }
-@@ -2082,10 +1966,6 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
- 	if (s->s_fs_info != server) {
- 		nfs_free_server(server);
- 		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
--		if (error)
--			goto error_splat_super;
- 	}
- 
- 	if (!s->s_root) {
-@@ -2108,9 +1988,6 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
- 	mnt->mnt_sb = s;
- 	mnt->mnt_root = mntroot;
- 
--	/* clone any lsm security options from the parent to the new sb */
--	security_sb_clone_mnt_opts(data->sb, s);
--
- 	dprintk("<-- nfs_xdev_get_sb() = 0\n");
- 	return 0;
- 
-@@ -2149,7 +2026,7 @@ static void nfs4_clone_super(struct super_block *sb,
- static void nfs4_fill_super(struct super_block *sb)
- {
- 	sb->s_time_gran = 1;
--	sb->s_op = &nfs4_sops;
-+	sb->s_op = (struct super_operations *) &nfs4_sops;
- 	nfs_initialise_sb(sb);
- }
- 
-@@ -2309,8 +2186,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
- 	if (data == NULL || mntfh == NULL)
- 		goto out_free_fh;
- 
--	security_init_mnt_opts(&data->lsm_opts);
--
- 	/* Validate the mount data */
- 	error = nfs4_validate_mount_data(raw_data, data, dev_name);
- 	if (error < 0)
-@@ -2337,10 +2212,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
- 	if (s->s_fs_info != server) {
- 		nfs_free_server(server);
- 		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
--		if (error)
--			goto error_splat_super;
- 	}
- 
- 	if (!s->s_root) {
-@@ -2354,10 +2225,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
- 		goto error_splat_super;
- 	}
- 
--	error = security_sb_set_mnt_opts(s, &data->lsm_opts);
--	if (error)
--		goto error_splat_root;
--
- 	s->s_flags |= MS_ACTIVE;
- 	mnt->mnt_sb = s;
- 	mnt->mnt_root = mntroot;
-@@ -2367,7 +2234,6 @@ out:
- 	kfree(data->client_address);
- 	kfree(data->nfs_server.export_path);
- 	kfree(data->nfs_server.hostname);
--	security_free_mnt_opts(&data->lsm_opts);
- out_free_fh:
- 	kfree(mntfh);
- 	kfree(data);
-@@ -2377,8 +2243,6 @@ out_free:
- 	nfs_free_server(server);
- 	goto out;
- 
--error_splat_root:
--	dput(mntroot);
- error_splat_super:
- 	up_write(&s->s_umount);
- 	deactivate_super(s);
-@@ -2436,10 +2300,6 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
- 	if (s->s_fs_info != server) {
- 		nfs_free_server(server);
- 		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
--		if (error)
--			goto error_splat_super;
- 	}
- 
- 	if (!s->s_root) {
-@@ -2462,8 +2322,6 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
- 	mnt->mnt_sb = s;
- 	mnt->mnt_root = mntroot;
- 
--	security_sb_clone_mnt_opts(data->sb, s);
--
- 	dprintk("<-- nfs4_xdev_get_sb() = 0\n");
- 	return 0;
- 
-@@ -2521,10 +2379,6 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
- 	if (s->s_fs_info != server) {
- 		nfs_free_server(server);
- 		server = NULL;
--	} else {
--		error = nfs_bdi_register(server);
--		if (error)
--			goto error_splat_super;
- 	}
- 
- 	if (!s->s_root) {
-@@ -2547,8 +2401,6 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
- 	mnt->mnt_sb = s;
- 	mnt->mnt_root = mntroot;
- 
--	security_sb_clone_mnt_opts(data->sb, s);
--
- 	dprintk("<-- nfs4_referral_get_sb() = 0\n");
- 	return 0;
- 
 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
-index 08f6b04..1f7a35a 100644
+index 08f6b04..91f5069 100644
 --- a/fs/nfs/direct.c
 +++ b/fs/nfs/direct.c
 @@ -116,7 +116,7 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
@@ -1178,60 +464,7 @@
  			(long long) pos, nr_segs);
  
  	return -EINVAL;
-@@ -193,7 +193,7 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
- 	if (dreq->iocb)
- 		goto out;
- 
--	result = wait_for_completion_killable(&dreq->completion);
-+	result = wait_for_completion_interruptible(&dreq->completion);
- 
- 	if (!result)
- 		result = dreq->error;
-@@ -412,7 +412,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
- 			       unsigned long nr_segs, loff_t pos)
- {
- 	ssize_t result = 0;
-+	sigset_t oldset;
- 	struct inode *inode = iocb->ki_filp->f_mapping->host;
-+	struct rpc_clnt *clnt = NFS_CLIENT(inode);
- 	struct nfs_direct_req *dreq;
- 
- 	dreq = nfs_direct_req_alloc();
-@@ -424,9 +426,11 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
- 	if (!is_sync_kiocb(iocb))
- 		dreq->iocb = iocb;
- 
-+	rpc_clnt_sigmask(clnt, &oldset);
- 	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
- 	if (!result)
- 		result = nfs_direct_wait(dreq);
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	nfs_direct_req_release(dreq);
- 
- 	return result;
-@@ -832,7 +836,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
- 				size_t count)
- {
- 	ssize_t result = 0;
-+	sigset_t oldset;
- 	struct inode *inode = iocb->ki_filp->f_mapping->host;
-+	struct rpc_clnt *clnt = NFS_CLIENT(inode);
- 	struct nfs_direct_req *dreq;
- 	size_t wsize = NFS_SERVER(inode)->wsize;
- 	int sync = NFS_UNSTABLE;
-@@ -850,9 +856,11 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
- 	if (!is_sync_kiocb(iocb))
- 		dreq->iocb = iocb;
- 
-+	rpc_clnt_sigmask(clnt, &oldset);
- 	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
- 	if (!result)
- 		result = nfs_direct_wait(dreq);
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	nfs_direct_req_release(dreq);
- 
- 	return result;
-@@ -891,8 +899,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
+@@ -891,8 +891,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
  	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
  
  	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
@@ -1242,7 +475,7 @@
  		count, (long long) pos);
  
  	retval = 0;
-@@ -948,8 +956,8 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+@@ -948,8 +948,8 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
  	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
  
  	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
@@ -1253,132 +486,11 @@
  		count, (long long) pos);
  
  	retval = generic_write_checks(file, &pos, &count, 0);
-diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
-index 7f07920..3b3dbb9 100644
---- a/fs/nfs/pagelist.c
-+++ b/fs/nfs/pagelist.c
-@@ -58,6 +58,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
- 		   struct page *page,
- 		   unsigned int offset, unsigned int count)
- {
-+	struct nfs_server *server = NFS_SERVER(inode);
- 	struct nfs_page		*req;
- 
- 	for (;;) {
-@@ -66,7 +67,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
- 		if (req != NULL)
- 			break;
- 
--		if (fatal_signal_pending(current))
-+		if (signalled() && (server->flags & NFS_MOUNT_INTR))
- 			return ERR_PTR(-ERESTARTSYS);
- 		yield();
- 	}
-@@ -176,11 +177,11 @@ void nfs_release_request(struct nfs_page *req)
- 	kref_put(&req->wb_kref, nfs_free_request);
- }
- 
--static int nfs_wait_bit_killable(void *word)
-+static int nfs_wait_bit_interruptible(void *word)
- {
- 	int ret = 0;
- 
--	if (fatal_signal_pending(current))
-+	if (signal_pending(current))
- 		ret = -ERESTARTSYS;
- 	else
- 		schedule();
-@@ -191,18 +192,26 @@ static int nfs_wait_bit_killable(void *word)
-  * nfs_wait_on_request - Wait for a request to complete.
-  * @req: request to wait upon.
-  *
-- * Interruptible by fatal signals only.
-+ * Interruptible by signals only if mounted with intr flag.
-  * The user is responsible for holding a count on the request.
-  */
- int
- nfs_wait_on_request(struct nfs_page *req)
- {
-+	struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
-+	sigset_t oldmask;
- 	int ret = 0;
- 
- 	if (!test_bit(PG_BUSY, &req->wb_flags))
- 		goto out;
-+	/*
-+	 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
-+	 *	 are not interrupted if intr flag is not set
-+	 */
-+	rpc_clnt_sigmask(clnt, &oldmask);
- 	ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
--			nfs_wait_bit_killable, TASK_KILLABLE);
-+			nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
-+	rpc_clnt_sigunmask(clnt, &oldmask);
- out:
- 	return ret;
- }
-diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
-index 4dbb84d..c351a41 100644
---- a/fs/nfs/proc.c
-+++ b/fs/nfs/proc.c
-@@ -595,7 +595,7 @@ nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
- static int
- nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 
- 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
- }
-diff --git a/fs/nfs/read.c b/fs/nfs/read.c
-index 40d1798..32c5380 100644
---- a/fs/nfs/read.c
-+++ b/fs/nfs/read.c
-@@ -76,7 +76,7 @@ void nfs_readdata_release(void *data)
- static
- int nfs_return_empty_page(struct page *page)
- {
--	zero_user(page, 0, PAGE_CACHE_SIZE);
-+	memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
- 	SetPageUptodate(page);
- 	unlock_page(page);
- 	return 0;
-@@ -100,10 +100,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
- 	pglen = PAGE_CACHE_SIZE - base;
- 	for (;;) {
- 		if (remainder <= pglen) {
--			zero_user(*pages, base, remainder);
-+			memclear_highpage_flush(*pages, base, remainder);
- 			break;
- 		}
--		zero_user(*pages, base, pglen);
-+		memclear_highpage_flush(*pages, base, pglen);
- 		pages++;
- 		remainder -= pglen;
- 		pglen = PAGE_CACHE_SIZE;
-@@ -127,7 +127,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
- 		return PTR_ERR(new);
- 	}
- 	if (len < PAGE_CACHE_SIZE)
--		zero_user_segment(page, len, PAGE_CACHE_SIZE);
-+		memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
- 
- 	nfs_list_add_request(new, &one_request);
- 	if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
-@@ -548,7 +548,7 @@ readpage_async_filler(void *data, struct page *page)
- 		goto out_error;
- 
- 	if (len < PAGE_CACHE_SIZE)
--		zero_user_segment(page, len, PAGE_CACHE_SIZE);
-+		memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
- 	if (!nfs_pageio_add_request(desc->pgio, new)) {
- 		error = desc->pgio->pg_error;
- 		goto out_unlock;
 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
-index 7846065..b2cc9e1 100644
+index 7846065..7962293 100644
 --- a/fs/nfs/file.c
 +++ b/fs/nfs/file.c
-@@ -45,18 +45,13 @@ static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
+@@ -45,16 +45,13 @@ static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
  static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
  					struct pipe_inode_info *pipe,
  					size_t count, unsigned int flags);
@@ -1386,28 +498,38 @@
 -				unsigned long nr_segs, loff_t pos);
 -static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
 -				unsigned long nr_segs, loff_t pos);
-+static ssize_t nfs_file_read(struct kiocb *, char __user *, size_t, loff_t);
-+static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
++static ssize_t nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos);
++static ssize_t nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos);
  static int  nfs_file_flush(struct file *, fl_owner_t id);
  static int  nfs_file_fsync(struct file *, struct dentry *dentry, int datasync);
  static int nfs_check_flags(int flags);
  static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
  static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
 -static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
--
--static struct vm_operations_struct nfs_file_vm_ops;
  
- const struct file_operations nfs_file_operations = {
- 	.llseek		= nfs_file_llseek,
-@@ -77,7 +72,6 @@ const struct file_operations nfs_file_operations = {
+ static struct vm_operations_struct nfs_file_vm_ops;
+ 
+@@ -77,17 +74,16 @@ const struct file_operations nfs_file_operations = {
  	.flock		= nfs_flock,
  	.splice_read	= nfs_file_splice_read,
  	.check_flags	= nfs_check_flags,
 -	.setlease	= nfs_setlease,
  };
  
- const struct inode_operations nfs_file_inode_operations = {
-@@ -120,8 +114,8 @@ nfs_file_open(struct inode *inode, struct file *filp)
+-const struct inode_operations nfs_file_inode_operations = {
++struct inode_operations nfs_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_file_inode_operations = {
++struct inode_operations nfs3_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+@@ -120,8 +116,8 @@ nfs_file_open(struct inode *inode, struct file *filp)
  	int res;
  
  	dprintk("NFS: open file(%s/%s)\n",
@@ -1418,7 +540,7 @@
  
  	res = nfs_check_flags(filp->f_flags);
  	if (res)
-@@ -135,7 +129,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
+@@ -135,7 +131,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
  static int
  nfs_file_release(struct inode *inode, struct file *filp)
  {
@@ -1427,7 +549,7 @@
  
  	dprintk("NFS: release(%s/%s)\n",
  			dentry->d_parent->d_name.name,
-@@ -178,11 +172,9 @@ force_reval:
+@@ -178,11 +174,9 @@ force_reval:
  
  static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
  {
@@ -1441,7 +563,7 @@
  			offset, origin);
  
  	/* origin == SEEK_END => we must revalidate the cached file length */
-@@ -192,10 +184,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+@@ -192,10 +186,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
  		if (retval < 0)
  			return (loff_t)retval;
  	}
@@ -1453,7 +575,7 @@
  }
  
  /*
-@@ -230,7 +219,7 @@ static int
+@@ -230,7 +221,7 @@ static int
  nfs_file_flush(struct file *file, fl_owner_t id)
  {
  	struct nfs_open_context *ctx = nfs_file_open_context(file);
@@ -1462,7 +584,7 @@
  	struct inode	*inode = dentry->d_inode;
  	int		status;
  
-@@ -250,16 +239,14 @@ nfs_file_flush(struct file *file, fl_owner_t id)
+@@ -250,16 +241,15 @@ nfs_file_flush(struct file *file, fl_owner_t id)
  }
  
  static ssize_t
@@ -1475,14 +597,15 @@
  	struct inode * inode = dentry->d_inode;
  	ssize_t result;
 -	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
  
  	if (iocb->ki_filp->f_flags & O_DIRECT)
 -		return nfs_file_direct_read(iocb, iov, nr_segs, pos);
-+		return nfs_file_direct_read(iocb, buf, count, pos);
++		return nfs_file_direct_read(iocb, &local_iov, 1, pos);
  
  	dprintk("NFS: read(%s/%s, %lu@%lu)\n",
  		dentry->d_parent->d_name.name, dentry->d_name.name,
-@@ -268,7 +255,7 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
+@@ -268,7 +258,7 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
  	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
  	nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count);
  	if (!result)
@@ -1491,7 +614,7 @@
  	return result;
  }
  
-@@ -277,7 +264,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
+@@ -277,7 +267,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
  		     struct pipe_inode_info *pipe, size_t count,
  		     unsigned int flags)
  {
@@ -1500,7 +623,7 @@
  	struct inode *inode = dentry->d_inode;
  	ssize_t res;
  
-@@ -294,7 +281,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
+@@ -294,7 +284,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
  static int
  nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
  {
@@ -1509,32 +632,7 @@
  	struct inode *inode = dentry->d_inode;
  	int	status;
  
-@@ -302,11 +289,8 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
- 		dentry->d_parent->d_name.name, dentry->d_name.name);
- 
- 	status = nfs_revalidate_mapping(inode, file->f_mapping);
--	if (!status) {
--		vma->vm_ops = &nfs_file_vm_ops;
--		vma->vm_flags |= VM_CAN_NONLINEAR;
--		file_accessed(file);
--	}
-+	if (!status) 
-+		status = generic_file_mmap(file, vma);
- 	return status;
- }
- 
-@@ -330,80 +314,27 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
- }
- 
- /*
-- * This does the "real" work of the write. We must allocate and lock the
-- * page to be sent back to the generic routine, which then copies the
-- * data from user space.
-+ * This does the "real" work of the write. The generic routine has
-+ * allocated the page, locked it, done all the page alignment stuff
-+ * calculations etc. Now we should just copy the data from user
-+ * space and write it back to the real medium..
-  *
+@@ -337,44 +327,15 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
   * If the writer ends up delaying the write, the writer needs to
   * increment the page use counts until he is done with the page.
   */
@@ -1573,65 +671,48 @@
 +static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
  {
 -	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
--	int status;
+ 	int status;
 -
 -	dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
 -		file->f_path.dentry->d_parent->d_name.name,
 -		file->f_path.dentry->d_name.name,
 -		mapping->host->i_ino, len, (long long) pos);
-+	long status;
++	unsigned copied = to - offset;
  
--	/*
--	 * Zero any uninitialised parts of the page, and then mark the page
--	 * as up to date if it turns out that we're extending the file.
--	 */
--	if (!PageUptodate(page)) {
--		unsigned pglen = nfs_page_length(page);
+ 	/*
+ 	 * Zero any uninitialised parts of the page, and then mark the page
+@@ -382,14 +343,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
 -		unsigned end = offset + len;
--
--		if (pglen == 0) {
--			zero_user_segments(page, 0, offset,
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
 -					end, PAGE_CACHE_SIZE);
--			SetPageUptodate(page);
++					to, PAGE_CACHE_SIZE);
+ 			SetPageUptodate(page);
 -		} else if (end >= pglen) {
 -			zero_user_segment(page, end, PAGE_CACHE_SIZE);
--			if (offset == 0)
--				SetPageUptodate(page);
--		} else
--			zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
--	}
--
--	status = nfs_updatepage(file, page, offset, copied);
--
++		} else if (to >= pglen) {
++			zero_user_segment(page, to, PAGE_CACHE_SIZE);
+ 			if (offset == 0)
+ 				SetPageUptodate(page);
+ 		} else
+@@ -398,9 +358,6 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 
+ 	status = nfs_updatepage(file, page, offset, copied);
+ 
 -	unlock_page(page);
 -	page_cache_release(page);
 -
--	if (status < 0)
--		return status;
--	return copied;
-+	lock_kernel();
-+	status = nfs_updatepage(file, page, offset, to-offset);
-+	unlock_kernel();
-+	return status;
+ 	if (status < 0)
+ 		return status;
+ 	return copied;
+@@ -424,34 +381,23 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+ 	return 0;
  }
  
- static void nfs_invalidate_page(struct page *page, unsigned long offset)
-@@ -413,25 +344,21 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
- 	if (offset != 0)
- 		return;
- 	/* Cancel any unstarted writes on this page */
--	nfs_wb_page_cancel(page->mapping->host, page);
-+	nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
- }
- 
- static int nfs_release_page(struct page *page, gfp_t gfp)
- {
- 	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
- 
--	/* If PagePrivate() is set, then the page is not freeable */
--	return 0;
--}
--
 -static int nfs_launder_page(struct page *page)
 -{
 -	struct inode *inode = page->mapping->host;
@@ -1640,18 +721,11 @@
 -		inode->i_ino, (long long)page_offset(page));
 -
 -	return nfs_wb_page(inode, page);
-+	/*
-+	 * Avoid deadlock on nfs_wait_on_request().
-+	 */
-+	if (!(gfp & __GFP_FS))
-+		return 0;
-+	/* Hack... Force nfs_wb_page() to write out the page */
-+	SetPageDirty(page);
-+	return !nfs_wb_page(page->mapping->host, page);
- }
- 
+-}
+-
  const struct address_space_operations nfs_file_aops = {
-@@ -440,52 +367,11 @@ const struct address_space_operations nfs_file_aops = {
+ 	.readpage = nfs_readpage,
+ 	.readpages = nfs_readpages,
  	.set_page_dirty = __set_page_dirty_nobuffers,
  	.writepage = nfs_writepage,
  	.writepages = nfs_writepages,
@@ -1663,76 +737,49 @@
  	.releasepage = nfs_release_page,
  	.direct_IO = nfs_direct_IO,
 -	.launder_page = nfs_launder_page,
--};
--
--static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
--{
--	struct file *filp = vma->vm_file;
+ };
+ 
+ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+ {
+ 	struct file *filp = vma->vm_file;
 -	struct dentry *dentry = filp->f_path.dentry;
--	unsigned pagelen;
--	int ret = -EINVAL;
--	struct address_space *mapping;
--
--	dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
--		dentry->d_parent->d_name.name, dentry->d_name.name,
--		filp->f_mapping->host->i_ino,
--		(long long)page_offset(page));
--
--	lock_page(page);
--	mapping = page->mapping;
--	if (mapping != dentry->d_inode->i_mapping)
--		goto out_unlock;
--
--	ret = 0;
--	pagelen = nfs_page_length(page);
--	if (pagelen == 0)
--		goto out_unlock;
--
--	ret = nfs_flush_incompatible(filp, page);
--	if (ret != 0)
--		goto out_unlock;
--
--	ret = nfs_updatepage(filp, page, 0, pagelen);
--	if (ret == 0)
--		ret = pagelen;
--out_unlock:
--	unlock_page(page);
--	return ret;
--}
--
--static struct vm_operations_struct nfs_file_vm_ops = {
++	struct dentry *dentry = filp->f_dentry;
+ 	unsigned pagelen;
+ 	int ret = -EINVAL;
+ 	struct address_space *mapping;
+@@ -484,7 +430,8 @@ out_unlock:
+ }
+ 
+ static struct vm_operations_struct nfs_file_vm_ops = {
 -	.fault = filemap_fault,
--	.page_mkwrite = nfs_vm_page_mkwrite,
++	.nopage		= filemap_nopage,
++	.populate	= filemap_populate,
+ 	.page_mkwrite = nfs_vm_page_mkwrite,
  };
  
- static int nfs_need_sync_write(struct file *filp, struct inode *inode)
-@@ -500,20 +386,18 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+@@ -500,16 +447,16 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
  	return 0;
  }
  
 -static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
 -				unsigned long nr_segs, loff_t pos)
-+static ssize_t nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
++static ssize_t
++nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
  {
 -	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
 +	struct dentry * dentry = iocb->ki_filp->f_dentry;
  	struct inode * inode = dentry->d_inode;
  	ssize_t result;
 -	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
  
  	if (iocb->ki_filp->f_flags & O_DIRECT)
 -		return nfs_file_direct_write(iocb, iov, nr_segs, pos);
-+		return nfs_file_direct_write(iocb, buf, count, pos);
++		return nfs_file_direct_write(iocb, &local_iov, 1, pos);
  
--	dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
-+	dprintk("NFS: write(%s/%s, %lu@%lu)\n",
+ 	dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
  		dentry->d_parent->d_name.name, dentry->d_name.name,
--		(unsigned long) count, (long long) pos);
-+		(unsigned long) count, (unsigned long) pos);
- 
- 	result = -EBUSY;
- 	if (IS_SWAPFILE(inode))
-@@ -532,7 +416,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+@@ -532,7 +479,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
  		goto out;
  
  	nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
@@ -1741,7 +788,7 @@
  	/* Return error values for O_SYNC and IS_SYNC() */
  	if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
  		int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
-@@ -549,14 +433,17 @@ out_swapfile:
+@@ -549,14 +496,19 @@ out_swapfile:
  
  static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
  {
@@ -1753,16 +800,17 @@
  	/* Try local locking first */
 -	posix_test_lock(filp, fl);
 -	if (fl->fl_type != F_UNLCK) {
--		/* found a conflict */
 +	if (posix_test_lock(filp, fl, &cfl)) {
+ 		/* found a conflict */
 +		fl->fl_start = cfl.fl_start;
 +		fl->fl_end = cfl.fl_end;
 +		fl->fl_type = cfl.fl_type;
 +		fl->fl_pid = cfl.fl_pid;
++
  		goto out;
  	}
  
-@@ -662,15 +549,16 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+@@ -662,8 +614,8 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
  	int ret = -ENOLCK;
  
  	dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
@@ -1773,16 +821,7 @@
  			fl->fl_type, fl->fl_flags,
  			(long long)fl->fl_start, (long long)fl->fl_end);
  
- 	nfs_inc_stats(inode, NFSIOS_VFSLOCK);
- 
- 	/* No mandatory locks over NFS */
--	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-+	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 
-+			fl->fl_type != F_UNLCK)
- 		goto out_err;
- 
- 	if (NFS_PROTO(inode)->lock_check_bounds != NULL) {
-@@ -695,8 +583,8 @@ out_err:
+@@ -695,8 +647,8 @@ out_err:
  static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
  {
  	dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
@@ -1793,7 +832,7 @@
  			fl->fl_type, fl->fl_flags);
  
  	/*
-@@ -718,16 +606,3 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+@@ -718,16 +670,3 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
  		return do_unlk(filp, cmd, fl);
  	return do_setlk(filp, cmd, fl);
  }
@@ -1810,241 +849,84 @@
 -
 -	return -EINVAL;
 -}
-diff --git a/fs/nfs/write.c b/fs/nfs/write.c
-index 3229e21..9e9325a 100644
---- a/fs/nfs/write.c
-+++ b/fs/nfs/write.c
-@@ -20,6 +20,8 @@
- #include <linux/nfs_page.h>
- #include <linux/backing-dev.h>
+diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
+index fae9719..5bf9b3c 100644
+--- a/fs/nfs/getroot.c
++++ b/fs/nfs/getroot.c
+@@ -30,7 +30,6 @@
+ #include <linux/nfs_idmap.h>
+ #include <linux/vfs.h>
+ #include <linux/namei.h>
+-#include <linux/mnt_namespace.h>
+ #include <linux/security.h>
  
-+#include <linux/mpage.h>
-+
- #include <asm/uaccess.h>
- 
- #include "delegation.h"
-@@ -181,41 +183,6 @@ static int wb_priority(struct writeback_control *wbc)
- }
- 
- /*
-- * NFS congestion control
-- */
--
--int nfs_congestion_kb;
--
--#define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
--#define NFS_CONGESTION_OFF_THRESH	\
--	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
--
--static int nfs_set_page_writeback(struct page *page)
--{
--	int ret = test_set_page_writeback(page);
--
--	if (!ret) {
--		struct inode *inode = page->mapping->host;
--		struct nfs_server *nfss = NFS_SERVER(inode);
--
--		if (atomic_long_inc_return(&nfss->writeback) >
--				NFS_CONGESTION_ON_THRESH)
--			set_bdi_congested(&nfss->backing_dev_info, WRITE);
--	}
--	return ret;
--}
--
--static void nfs_end_page_writeback(struct page *page)
--{
--	struct inode *inode = page->mapping->host;
--	struct nfs_server *nfss = NFS_SERVER(inode);
--
--	end_page_writeback(page);
--	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
--		clear_bdi_congested(&nfss->backing_dev_info, WRITE);
--}
--
--/*
-  * Find an associated nfs write request, and prepare to flush it out
-  * May return an error if the user signalled nfs_wait_on_request().
+ #include <asm/system.h>
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 86147b0..148aebe 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -376,7 +376,7 @@ idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ static ssize_t
+ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ {
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	struct idmap *idmap = (struct idmap *)rpci->private;
+ 	struct idmap_msg im_in, *im = &idmap->idmap_im;
+ 	struct idmap_hashtable *h;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 52daefa..8e2b88a 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -612,7 +612,7 @@ static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
   */
-@@ -251,7 +218,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
- 		spin_unlock(&inode->i_lock);
- 		BUG();
- 	}
--	if (nfs_set_page_writeback(page) != 0) {
-+	if (test_set_page_writeback(page) != 0) {
- 		spin_unlock(&inode->i_lock);
- 		BUG();
- 	}
-@@ -319,11 +286,11 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
- 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
+ static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct nfs_inode *nfsi = NFS_I(inode);
  
- 	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
--	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
-+	err = generic_writepages(mapping, wbc);
- 	nfs_pageio_complete(&pgio);
--	if (err < 0)
-+	if (err)
- 		return err;
--	if (pgio.pg_error < 0)
-+	if (pgio.pg_error)
- 		return pgio.pg_error;
- 	return 0;
- }
-@@ -411,7 +378,6 @@ nfs_mark_request_commit(struct nfs_page *req)
- 			NFS_PAGE_TAG_COMMIT);
- 	spin_unlock(&inode->i_lock);
- 	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
--	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
- 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- }
+ 	filp->private_data = get_nfs_open_context(ctx);
+@@ -644,7 +644,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
  
-@@ -422,7 +388,6 @@ nfs_clear_request_commit(struct nfs_page *req)
- 
- 	if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
- 		dec_zone_page_state(page, NR_UNSTABLE_NFS);
--		dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
- 		return 1;
- 	}
- 	return 0;
-@@ -726,8 +691,8 @@ int nfs_updatepage(struct file *file, struct page *page,
- 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
- 
- 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
--		file->f_path.dentry->d_parent->d_name.name,
--		file->f_path.dentry->d_name.name, count,
-+		file->f_dentry->d_parent->d_name.name,
-+		file->f_dentry->d_name.name, count,
- 		(long long)(page_offset(page) + offset));
- 
- 	/* If we're not using byte range locks, and we know the page
-@@ -757,10 +722,10 @@ static void nfs_writepage_release(struct nfs_page *req)
+ static void nfs_file_clear_open_context(struct file *filp)
  {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct nfs_open_context *ctx = nfs_file_open_context(filp);
  
- 	if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
--		nfs_end_page_writeback(req->wb_page);
-+		end_page_writeback(req->wb_page);
- 		nfs_inode_remove_request(req);
- 	} else
--		nfs_end_page_writeback(req->wb_page);
-+		end_page_writeback(req->wb_page);
- 	nfs_clear_page_tag_locked(req);
+ 	if (ctx) {
+@@ -667,7 +667,7 @@ int nfs_open(struct inode *inode, struct file *filp)
+ 	cred = rpc_lookup_cred();
+ 	if (IS_ERR(cred))
+ 		return PTR_ERR(cred);
+-	ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
++	ctx = alloc_nfs_open_context(filp->f_vfsmnt, filp->f_dentry, cred);
+ 	put_rpccred(cred);
+ 	if (ctx == NULL)
+ 		return -ENOMEM;
+@@ -1242,7 +1242,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
+ #endif
  }
  
-@@ -854,7 +819,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
- static void nfs_redirty_request(struct nfs_page *req)
+-static void init_once(void *foo)
++static void init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
  {
- 	nfs_mark_request_dirty(req);
--	nfs_end_page_writeback(req->wb_page);
-+	end_page_writeback(req->wb_page);
- 	nfs_clear_page_tag_locked(req);
- }
+ 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
  
-@@ -1074,13 +1039,13 @@ static void nfs_writeback_release_full(void *calldata)
- 		if (nfs_write_need_commit(data)) {
- 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- 			nfs_mark_request_commit(req);
--			nfs_end_page_writeback(page);
-+			end_page_writeback(page);
- 			dprintk(" marked for commit\n");
- 			goto next;
- 		}
- 		dprintk(" OK\n");
- remove_request:
--		nfs_end_page_writeback(page);
-+		end_page_writeback(page);
- 		nfs_inode_remove_request(req);
- 	next:
- 		nfs_clear_page_tag_locked(req);
-@@ -1264,8 +1229,6 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
- 		nfs_list_remove_request(req);
- 		nfs_mark_request_commit(req);
- 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
--		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
--				BDI_RECLAIMABLE);
- 		nfs_clear_page_tag_locked(req);
- 	}
- 	return -ENOMEM;
-@@ -1452,51 +1415,7 @@ int nfs_wb_nocommit(struct inode *inode)
- 	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
- }
- 
--int nfs_wb_page_cancel(struct inode *inode, struct page *page)
--{
--	struct nfs_page *req;
--	loff_t range_start = page_offset(page);
--	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
--	struct writeback_control wbc = {
--		.bdi = page->mapping->backing_dev_info,
--		.sync_mode = WB_SYNC_ALL,
--		.nr_to_write = LONG_MAX,
--		.range_start = range_start,
--		.range_end = range_end,
--	};
--	int ret = 0;
--
--	BUG_ON(!PageLocked(page));
--	for (;;) {
--		req = nfs_page_find_request(page);
--		if (req == NULL)
--			goto out;
--		if (test_bit(PG_CLEAN, &req->wb_flags)) {
--			nfs_release_request(req);
--			break;
--		}
--		if (nfs_lock_request_dontget(req)) {
--			nfs_inode_remove_request(req);
--			/*
--			 * In case nfs_inode_remove_request has marked the
--			 * page as being dirty
--			 */
--			cancel_dirty_page(page, PAGE_CACHE_SIZE);
--			nfs_unlock_request(req);
--			break;
--		}
--		ret = nfs_wait_on_request(req);
--		if (ret < 0)
--			goto out;
--	}
--	if (!PagePrivate(page))
--		return 0;
--	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
--out:
--	return ret;
--}
--
--static int nfs_wb_page_priority(struct inode *inode, struct page *page,
-+int nfs_wb_page_priority(struct inode *inode, struct page *page,
- 				int how)
+@@ -1314,6 +1314,10 @@ static int __init init_nfs_fs(void)
  {
- 	loff_t range_start = page_offset(page);
-@@ -1554,26 +1473,6 @@ int __init nfs_init_writepagecache(void)
- 	if (nfs_commit_mempool == NULL)
- 		return -ENOMEM;
+ 	int err;
  
--	/*
--	 * NFS congestion size, scale with available memory.
--	 *
--	 *  64MB:    8192k
--	 * 128MB:   11585k
--	 * 256MB:   16384k
--	 * 512MB:   23170k
--	 *   1GB:   32768k
--	 *   2GB:   46340k
--	 *   4GB:   65536k
--	 *   8GB:   92681k
--	 *  16GB:  131072k
--	 *
--	 * This allows larger machines to have larger/more transfers.
--	 * Limit the default to 256M
--	 */
--	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
--	if (nfs_congestion_kb > 256*1024)
--		nfs_congestion_kb = 256*1024;
--
- 	return 0;
- }
- 
++	err = init_mnt_writers();
++	if (err)
++		goto out6;
++
+ 	err = nfsiod_start();
+ 	if (err)
+ 		goto out6;
 diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
-index 66df08d..44c0445 100644
+index 66df08d..1e11b1d 100644
 --- a/fs/nfs/namespace.c
 +++ b/fs/nfs/namespace.c
 @@ -107,29 +107,29 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
@@ -2084,27 +966,25 @@
  			   &nfs_automount_list);
  	if (err < 0) {
  		mntput(mnt);
-@@ -137,9 +137,10 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+@@ -137,9 +137,9 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
  			goto out_follow;
  		goto out_err;
  	}
 -	path_put(&nd->path);
 -	nd->path.mnt = mnt;
 -	nd->path.dentry = dget(mnt->mnt_root);
-+	dput(nd->dentry);
-+	mntput(nd->mnt);
++	backport_path_put(nd);
 +	nd->mnt = mnt;
 +	nd->dentry = dget(mnt->mnt_root);
  	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
  out:
  	dprintk("%s: done, returned %d\n", __func__, err);
-@@ -147,11 +148,12 @@ out:
+@@ -147,22 +147,22 @@ out:
  	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
  	return ERR_PTR(err);
  out_err:
 -	path_put(&nd->path);
-+	dput(nd->dentry);
-+	mntput(nd->mnt);
++	backport_path_put(nd);
  	goto out;
  out_follow:
 -	while (d_mountpoint(nd->path.dentry) &&
@@ -2114,32 +994,25 @@
  		;
  	err = 0;
  	goto out;
+ }
+ 
+-const struct inode_operations nfs_mountpoint_inode_operations = {
++struct inode_operations nfs_mountpoint_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ 	.getattr	= nfs_getattr,
+ };
+ 
+-const struct inode_operations nfs_referral_inode_operations = {
++struct inode_operations nfs_referral_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ };
+ 
 diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
-index 1e750e4..a078225 100644
+index 1e750e4..bdeef69 100644
 --- a/fs/nfs/nfs3proc.c
 +++ b/fs/nfs/nfs3proc.c
-@@ -27,14 +27,17 @@
+@@ -779,7 +779,7 @@ static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
  static int
- nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
- {
-+	sigset_t oldset;
- 	int res;
-+	rpc_clnt_sigmask(clnt, &oldset);
- 	do {
- 		res = rpc_call_sync(clnt, msg, flags);
- 		if (res != -EJUKEBOX)
- 			break;
--		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
-+		schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
- 		res = -ERESTARTSYS;
--	} while (!fatal_signal_pending(current));
-+	} while (!signalled());
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	return res;
- }
- 
-@@ -779,7 +782,7 @@ static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
- static int
  nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
  {
 -	struct inode *inode = filp->f_path.dentry->d_inode;
@@ -2147,44 +1020,24 @@
  
  	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
  }
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index ea79064..7a8e6fa 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -165,7 +165,7 @@ struct nfs4_state_recovery_ops {
+ };
+ 
+ extern struct dentry_operations nfs4_dentry_operations;
+-extern const struct inode_operations nfs4_dir_inode_operations;
++extern struct inode_operations nfs4_dir_inode_operations;
+ 
+ /* inode.c */
+ extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index c910413..d89ea38 100644
+index c910413..02f1156 100644
 --- a/fs/nfs/nfs4proc.c
 +++ b/fs/nfs/nfs4proc.c
-@@ -306,7 +306,8 @@ static void nfs4_opendata_free(struct kref *kref)
- 		nfs4_put_open_state(p->state);
- 	nfs4_put_state_owner(p->owner);
- 	dput(p->dir);
--	path_put(&p->path);
-+	dput(p->path.dentry);
-+	mntput(p->path.mnt);
- 	kfree(p);
- }
- 
-@@ -318,9 +319,12 @@ static void nfs4_opendata_put(struct nfs4_opendata *p)
- 
- static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
- {
-+	sigset_t oldset;
- 	int ret;
- 
-+	rpc_clnt_sigmask(task->tk_client, &oldset);
- 	ret = rpc_wait_for_completion_task(task);
-+	rpc_clnt_sigunmask(task->tk_client, &oldset);
- 	return ret;
- }
- 
-@@ -1209,7 +1213,8 @@ static void nfs4_free_closedata(void *data)
- 	nfs4_put_open_state(calldata->state);
- 	nfs_free_seqid(calldata->arg.seqid);
- 	nfs4_put_state_owner(sp);
--	path_put(&calldata->path);
-+	dput(calldata->path.dentry);
-+	mntput(calldata->path.mnt);
- 	kfree(calldata);
- }
- 
-@@ -1384,7 +1389,7 @@ struct dentry *
+@@ -1384,7 +1384,7 @@ struct dentry *
  nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
  {
  	struct path path = {
@@ -2193,7 +1046,18 @@
  		.dentry = dentry,
  	};
  	struct dentry *parent;
-@@ -1432,7 +1437,7 @@ int
+@@ -1421,8 +1421,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ 	}
+ 	res = d_add_unique(dentry, igrab(state->inode));
+ 	if (res != NULL)
+-		path.dentry = res;
+-	nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
++		dentry = res;
++	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 	nfs_unblock_sillyrename(parent);
+ 	nfs4_intent_set_file(nd, &path, state);
+ 	return res;
+@@ -1432,7 +1432,7 @@ int
  nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
  {
  	struct path path = {
@@ -2202,7 +1066,7 @@
  		.dentry = dentry,
  	};
  	struct rpc_cred *cred;
-@@ -1880,7 +1885,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+@@ -1880,7 +1880,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
                   int flags, struct nameidata *nd)
  {
  	struct path path = {
@@ -2211,523 +1075,166 @@
  		.dentry = dentry,
  	};
  	struct nfs4_state *state;
-@@ -2770,9 +2775,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
- 	return 0;
- }
+@@ -3671,7 +3671,7 @@ struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = {
+ 	.recover_lock	= nfs4_lock_expired,
+ };
  
--static int nfs4_wait_bit_killable(void *word)
-+static int nfs4_wait_bit_interruptible(void *word)
+-static const struct inode_operations nfs4_file_inode_operations = {
++static struct inode_operations nfs4_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
+index 4dbb84d..c351a41 100644
+--- a/fs/nfs/proc.c
++++ b/fs/nfs/proc.c
+@@ -595,7 +595,7 @@ nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+ static int
+ nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
  {
--	if (fatal_signal_pending(current))
-+	if (signal_pending(current))
- 		return -ERESTARTSYS;
- 	schedule();
- 	return 0;
-@@ -2780,14 +2785,17 @@ static int nfs4_wait_bit_killable(void *word)
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
  
- static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
- {
-+	sigset_t oldset;
- 	int res;
+ 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ }
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index e9b2017..85ea5fd 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -201,7 +201,7 @@ static match_table_t nfs_secflavor_tokens = {
+ };
  
- 	might_sleep();
  
- 	rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
+-static void nfs_umount_begin(struct super_block *);
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags);
+ static int  nfs_statfs(struct dentry *, struct kstatfs *);
+ static int  nfs_show_options(struct seq_file *, struct vfsmount *);
+ static int  nfs_show_stats(struct seq_file *, struct vfsmount *);
+@@ -228,7 +228,7 @@ struct file_system_type nfs_xdev_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
  
-+	rpc_clnt_sigmask(clnt, &oldset);
- 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
--			nfs4_wait_bit_killable, TASK_KILLABLE);
-+			nfs4_wait_bit_interruptible, TASK_INTERRUPTIBLE);
-+	rpc_clnt_sigunmask(clnt, &oldset);
+-static const struct super_operations nfs_sops = {
++static struct super_operations nfs_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
+@@ -274,7 +274,7 @@ struct file_system_type nfs4_referral_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
  
- 	rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
- 	return res;
-@@ -2795,6 +2803,7 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
+-static const struct super_operations nfs4_sops = {
++static struct super_operations nfs4_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
+@@ -287,10 +287,7 @@ static const struct super_operations nfs4_sops = {
+ };
+ #endif
  
- static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
- {
-+	sigset_t oldset;
- 	int res = 0;
+-static struct shrinker acl_shrinker = {
+-	.shrink		= nfs_access_cache_shrinker,
+-	.seeks		= DEFAULT_SEEKS,
+-};
++static struct shrinker *acl_shrinker;
  
- 	might_sleep();
-@@ -2803,9 +2812,14 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
- 		*timeout = NFS4_POLL_RETRY_MIN;
- 	if (*timeout > NFS4_POLL_RETRY_MAX)
- 		*timeout = NFS4_POLL_RETRY_MAX;
--	schedule_timeout_killable(*timeout);
--	if (fatal_signal_pending(current))
--		res = -ERESTARTSYS;
-+	rpc_clnt_sigmask(clnt, &oldset);
-+	if (clnt->cl_intr) {
-+		schedule_timeout_interruptible(*timeout);
-+		if (signalled())
-+			res = -ERESTARTSYS;
-+	} else
-+		schedule_timeout_uninterruptible(*timeout);
-+	rpc_clnt_sigunmask(clnt, &oldset);
- 	*timeout <<= 1;
- 	return res;
- }
-@@ -3044,7 +3058,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
- static unsigned long
- nfs4_set_lock_task_retry(unsigned long timeout)
- {
--	schedule_timeout_killable(timeout);
-+	schedule_timeout_interruptible(timeout);
- 	timeout <<= 1;
- 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
- 		return NFS4_LOCK_MAXTIMEOUT;
-diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
-index 3305acb..a535728 100644
---- a/fs/nfs/nfs4renewd.c
-+++ b/fs/nfs/nfs4renewd.c
-@@ -127,13 +127,15 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
- void
- nfs4_renewd_prepare_shutdown(struct nfs_server *server)
- {
-+	flush_scheduled_work();
- 	cancel_delayed_work(&server->nfs_client->cl_renewd);
- }
- 
- void
- nfs4_kill_renewd(struct nfs_client *clp)
- {
--	cancel_delayed_work_sync(&clp->cl_renewd);
-+	cancel_delayed_work(&clp->cl_renewd);
-+	flush_scheduled_work();
- }
- 
  /*
-diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
-index 86147b0..148aebe 100644
---- a/fs/nfs/idmap.c
-+++ b/fs/nfs/idmap.c
-@@ -376,7 +376,7 @@ idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- static ssize_t
- idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
- {
--	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
-+	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
- 	struct idmap *idmap = (struct idmap *)rpci->private;
- 	struct idmap_msg im_in, *im = &idmap->idmap_im;
- 	struct idmap_hashtable *h;
-diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
-index b62481d..bfb2c54 100644
---- a/fs/nfs/sysctl.c
-+++ b/fs/nfs/sysctl.c
-@@ -83,7 +83,7 @@ static ctl_table nfs_cb_sysctl_root[] = {
- 
- int nfs_register_sysctl(void)
- {
--	nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root);
-+	nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root, 0);
- 	if (nfs_callback_sysctl_table == NULL)
- 		return -ENOMEM;
+  * Register the NFS filesystems
+@@ -311,7 +308,7 @@ int __init register_nfs_fs(void)
+ 	if (ret < 0)
+ 		goto error_2;
+ #endif
+-	register_shrinker(&acl_shrinker);
++	acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
  	return 0;
-diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
-index c53e65f..092da9b 100644
---- a/fs/nfsd/nfsctl.c
-+++ b/fs/nfsd/nfsctl.c
-@@ -121,7 +121,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
  
- static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+ #ifdef CONFIG_NFS_V4
+@@ -329,7 +326,8 @@ error_0:
+  */
+ void __exit unregister_nfs_fs(void)
  {
--	ino_t ino =  file->f_path.dentry->d_inode->i_ino;
-+	ino_t ino =  file->f_dentry->d_inode->i_ino;
- 	char *data;
- 	ssize_t rv;
+-	unregister_shrinker(&acl_shrinker);
++	if (acl_shrinker != NULL)
++		remove_shrinker(acl_shrinker);
+ #ifdef CONFIG_NFS_V4
+ 	unregister_filesystem(&nfs4_fs_type);
+ #endif
+@@ -649,11 +647,13 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+  * Begin unmount by attempting to remove all automounted mountpoints we added
+  * in response to xdev traversals and referrals
+  */
+-static void nfs_umount_begin(struct super_block *sb)
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
+ {
+-	struct nfs_server *server = NFS_SB(sb);
++	struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb);
+ 	struct rpc_clnt *rpc;
  
-@@ -360,9 +360,10 @@ static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
- 	if (error)
- 		return error;
++	if (!(flags & MNT_FORCE))
++		return;
+ 	/* -EIO all pending I/O */
+ 	rpc = server->client_acl;
+ 	if (!IS_ERR(rpc))
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 412738d..b17f14a 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -70,7 +70,7 @@ read_failed:
+ /*
+  * symlinks can't do much...
+  */
+-const struct inode_operations nfs_symlink_inode_operations = {
++struct inode_operations nfs_symlink_inode_operations = {
+ 	.readlink	= generic_readlink,
+ 	.follow_link	= nfs_follow_link,
+ 	.put_link	= page_put_link,
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 3229e21..7dd87ba 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -20,6 +20,8 @@
+ #include <linux/nfs_page.h>
+ #include <linux/backing-dev.h>
  
--	error = nlmsvc_unlock_all_by_sb(nd.path.mnt->mnt_sb);
-+	error = nlmsvc_unlock_all_by_sb(nd.mnt->mnt_sb);
- 
--	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
- 	return error;
- }
- 
-@@ -857,9 +858,10 @@ static int create_proc_exports_entry(void)
- 	entry = proc_mkdir("fs/nfs", NULL);
- 	if (!entry)
- 		return -ENOMEM;
--	entry = proc_create("exports", 0, entry, &exports_operations);
-+	entry = create_proc_entry("exports", 0, NULL);
- 	if (!entry)
- 		return -ENOMEM;
-+	entry->proc_fops = &exports_operations;
- 	return 0;
- }
- #else /* CONFIG_PROC_FS */
-diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
-index ea37c96..dbd3dfd 100644
---- a/fs/nfsd/nfsfh.c
-+++ b/fs/nfsd/nfsfh.c
-@@ -51,7 +51,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
- 		/* make sure parents give x permission to user */
- 		int err;
- 		parent = dget_parent(tdentry);
--		err = inode_permission(parent->d_inode, MAY_EXEC);
-+		err = permission(parent->d_inode, MAY_EXEC, NULL);
- 		if (err < 0) {
- 			dput(parent);
- 			break;
-@@ -186,8 +186,10 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
- 		 * access control settings being in effect, we cannot
- 		 * fix that case easily.
- 		 */
--		current->cap_effective =
--			cap_raise_nfsd_set(current->cap_effective,
++#include <linux/mpage.h>
 +
-+#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
-+		
-+		cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
- 					   current->cap_permitted);
- 	} else {
- 		error = nfsd_setuser_and_check_port(rqstp, exp);
-diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index 18060be..9d98ccc 100644
---- a/fs/nfsd/vfs.c
-+++ b/fs/nfsd/vfs.c
-@@ -369,7 +369,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
- 	/* Revoke setuid/setgid on chown */
- 	if (((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid) ||
- 	    ((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid)) {
--		iap->ia_valid |= ATTR_KILL_PRIV;
-+		iap->ia_valid |= ATTR_KILL_SUID;
- 		if (iap->ia_valid & ATTR_MODE) {
- 			/* we're setting mode too, just clear the s*id bits */
- 			iap->ia_mode &= ~S_ISUID;
-@@ -697,7 +697,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 	 * locks on them because there is no way to know if the accesser has
- 	 * the lock.
- 	 */
--	if (S_ISREG((inode)->i_mode) && mandatory_lock(inode))
-+	if (S_ISREG((inode)->i_mode) && MANDATORY_LOCK(inode))
- 		goto out;
+ #include <asm/uaccess.h>
  
- 	if (!inode->i_fop)
-@@ -766,10 +766,10 @@ static int
- nfsd_sync(struct file *filp)
- {
-         int err;
--	struct inode *inode = filp->f_path.dentry->d_inode;
--	dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name);
-+	struct inode *inode = filp->f_dentry->d_inode;
-+	dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
- 	mutex_lock(&inode->i_mutex);
--	err=nfsd_dosync(filp, filp->f_path.dentry, filp->f_op);
-+	err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
- 	mutex_unlock(&inode->i_mutex);
+ #include "delegation.h"
+@@ -198,7 +200,7 @@ static int nfs_set_page_writeback(struct page *page)
+ 		struct inode *inode = page->mapping->host;
+ 		struct nfs_server *nfss = NFS_SERVER(inode);
  
- 	return err;
-@@ -895,7 +895,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- 	int		host_err;
- 
- 	err = nfserr_perm;
--	inode = file->f_path.dentry->d_inode;
-+	inode = file->f_dentry->d_inode;
- 
- 	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
- 		goto out;
-@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- 		nfsdstats.io_read += host_err;
- 		*count = host_err;
- 		err = 0;
--		fsnotify_access(file->f_path.dentry);
-+		fsnotify_access(file->f_dentry);
- 	} else 
- 		err = nfserrno(host_err);
- out:
-@@ -947,7 +947,7 @@ out:
- static void kill_suid(struct dentry *dentry)
- {
- 	struct iattr	ia;
--	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
-+	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
- 
- 	mutex_lock(&dentry->d_inode->i_mutex);
- 	notify_change(dentry, &ia);
-@@ -971,11 +971,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- 	err = nfserr_perm;
- 
- 	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
--		(!lock_may_write(file->f_path.dentry->d_inode, offset, cnt)))
-+		(!lock_may_write(file->f_dentry->d_inode, offset, cnt)))
- 		goto out;
- #endif
- 
--	dentry = file->f_path.dentry;
-+	dentry = file->f_dentry;
- 	inode = dentry->d_inode;
- 	exp   = fhp->fh_export;
- 
-@@ -1004,7 +1004,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- 	set_fs(oldfs);
- 	if (host_err >= 0) {
- 		nfsdstats.io_write += cnt;
--		fsnotify_modify(file->f_path.dentry);
-+		fsnotify_modify(file->f_dentry);
+-		if (atomic_long_inc_return(&nfss->writeback) >
++		if (atomic_long_inc_return((atomic_long_t *)&nfss->writeback) >
+ 				NFS_CONGESTION_ON_THRESH)
+ 			set_bdi_congested(&nfss->backing_dev_info, WRITE);
  	}
+@@ -211,7 +213,7 @@ static void nfs_end_page_writeback(struct page *page)
+ 	struct nfs_server *nfss = NFS_SERVER(inode);
  
- 	/* clear setuid/setgid flag after write */
-@@ -1030,13 +1030,13 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- 		if (EX_WGATHER(exp)) {
- 			if (atomic_read(&inode->i_writecount) > 1
- 			    || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
--				dprintk("nfsd: write defer %d\n", task_pid_nr(current));
-+				dprintk("nfsd: write defer %d\n", current->pid);
- 				msleep(10);
--				dprintk("nfsd: write resume %d\n", task_pid_nr(current));
-+				dprintk("nfsd: write resume %d\n", current->pid);
- 			}
- 
- 			if (inode->i_state & I_DIRTY) {
--				dprintk("nfsd: write sync %d\n", task_pid_nr(current));
-+				dprintk("nfsd: write sync %d\n", current->pid);
- 				host_err=nfsd_sync(file);
- 			}
- #if 0
-@@ -1257,10 +1257,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		goto out;
- 	}
- 
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
--
- 	/*
- 	 * Get the dir op function pointer.
- 	 */
-@@ -1279,10 +1275,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
- 		break;
- 	}
--	if (host_err < 0) {
--		mnt_drop_write(fhp->fh_export->ex_path.mnt);
-+	if (host_err < 0)
- 		goto out_nfserr;
--	}
- 
- 	if (EX_ISSYNC(fhp->fh_export)) {
- 		err = nfserrno(nfsd_sync_dir(dentry));
-@@ -1292,7 +1286,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	err2 = nfsd_create_setattr(rqstp, resfhp, iap);
- 	if (err2)
- 		err = err2;
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
- 	/*
- 	 * Update the file handle to get the new inode info.
- 	 */
-@@ -1370,9 +1363,6 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		v_atime = verifier[1]&0x7fffffff;
- 	}
- 	
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
- 	if (dchild->d_inode) {
- 		err = 0;
- 
-@@ -1404,15 +1394,12 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		case NFS3_CREATE_GUARDED:
- 			err = nfserr_exist;
- 		}
--		mnt_drop_write(fhp->fh_export->ex_path.mnt);
- 		goto out;
- 	}
- 
- 	host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
--	if (host_err < 0) {
--		mnt_drop_write(fhp->fh_export->ex_path.mnt);
-+	if (host_err < 0)
- 		goto out_nfserr;
--	}
- 	if (created)
- 		*created = 1;
- 
-@@ -1437,7 +1424,6 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	if (err2)
- 		err = err2;
- 
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
- 	/*
- 	 * Update the filehandle to get the new inode info.
- 	 */
-@@ -1516,6 +1502,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	struct dentry	*dentry, *dnew;
- 	__be32		err, cerr;
- 	int		host_err;
-+	umode_t		mode;
- 
- 	err = nfserr_noent;
- 	if (!flen || !plen)
-@@ -1534,9 +1521,10 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	if (IS_ERR(dnew))
- 		goto out_nfserr;
- 
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
-+	mode = S_IALLUGO;
-+	/* Only the MODE ATTRibute is even vaguely meaningful */
-+	if (iap && (iap->ia_valid & ATTR_MODE))
-+		mode = iap->ia_mode & S_IALLUGO;
- 
- 	if (unlikely(path[plen] != 0)) {
- 		char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
-@@ -1545,11 +1533,11 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		else {
- 			strncpy(path_alloced, path, plen);
- 			path_alloced[plen] = 0;
--			host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced);
-+			host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced, mode);
- 			kfree(path_alloced);
- 		}
- 	} else
--		host_err = vfs_symlink(dentry->d_inode, dnew, path);
-+		host_err = vfs_symlink(dentry->d_inode, dnew, path, mode);
- 
- 	if (!host_err) {
- 		if (EX_ISSYNC(fhp->fh_export))
-@@ -1558,8 +1546,6 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	err = nfserrno(host_err);
- 	fh_unlock(fhp);
- 
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
--
- 	cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
- 	dput(dnew);
- 	if (err==0) err = cerr;
-@@ -1610,11 +1596,6 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
- 	dold = tfhp->fh_dentry;
- 	dest = dold->d_inode;
- 
--	host_err = mnt_want_write(tfhp->fh_export->ex_path.mnt);
--	if (host_err) {
--		err = nfserrno(host_err);
--		goto out_dput;
--	}
- 	host_err = vfs_link(dold, dirp, dnew);
- 	if (!host_err) {
- 		if (EX_ISSYNC(ffhp->fh_export)) {
-@@ -1628,8 +1609,6 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
- 		else
- 			err = nfserrno(host_err);
- 	}
--	mnt_drop_write(tfhp->fh_export->ex_path.mnt);
--out_dput:
- 	dput(dnew);
- out_unlock:
- 	fh_unlock(ffhp);
-@@ -1712,9 +1691,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
- 	host_err = -EXDEV;
- 	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
- 		goto out_dput_new;
--	host_err = mnt_want_write(ffhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_dput_new;
- 
- 	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
- 	if (!host_err && EX_ISSYNC(tfhp->fh_export)) {
-@@ -1723,8 +1699,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
- 			host_err = nfsd_sync_dir(fdentry);
- 	}
- 
--	mnt_drop_write(ffhp->fh_export->ex_path.mnt);
--
-  out_dput_new:
- 	dput(ndentry);
-  out_dput_old:
-@@ -1783,10 +1757,6 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 	if (!type)
- 		type = rdentry->d_inode->i_mode & S_IFMT;
- 
--	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
--	if (host_err)
--		goto out_nfserr;
--
- 	if (type != S_IFDIR) { /* It's UNLINK */
- #ifdef MSNFS
- 		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-@@ -1807,7 +1777,6 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 		host_err = nfsd_sync_dir(dentry);
- 
- out_drop:
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
- out_nfserr:
- 	err = nfserrno(host_err);
- out:
-@@ -1904,7 +1873,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- 		inode->i_mode,
- 		IS_IMMUTABLE(inode)?	" immut" : "",
- 		IS_APPEND(inode)?	" append" : "",
--		__mnt_is_readonly(exp->ex_path.mnt)?	" ro" : "");
-+		IS_RDONLY(inode)?	" ro" : "");
- 	dprintk("      owner %d/%d user %d/%d\n",
- 		inode->i_uid, inode->i_gid, current->fsuid, current->fsgid);
- #endif
-@@ -1916,7 +1885,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- 	if (!(acc & NFSD_MAY_LOCAL_ACCESS))
- 		if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
- 			if (exp_rdonly(rqstp, exp) ||
--			    __mnt_is_readonly(exp->ex_path.mnt))
-+			    IS_RDONLY(inode))
- 				return nfserr_rofs;
- 			if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
- 				return nfserr_perm;
-@@ -1953,12 +1922,12 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- 		return 0;
- 
- 	/* This assumes  NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
--	err = inode_permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC));
-+	err = permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC), NULL);
- 
- 	/* Allow read access to binaries even when mode 111 */
- 	if (err == -EACCES && S_ISREG(inode->i_mode) &&
- 	    acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE))
--		err = inode_permission(inode, MAY_EXEC);
-+		err = permission(inode, MAY_EXEC, NULL);
- 
- 	return err? nfserrno(err) : 0;
+ 	end_page_writeback(page);
+-	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
++	if (atomic_long_dec_return((atomic_long_t *)&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
+ 		clear_bdi_congested(&nfss->backing_dev_info, WRITE);
  }
-@@ -2080,7 +2049,6 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
- 	} else
- 		size = 0;
  
--	error = mnt_want_write(fhp->fh_export->ex_path.mnt);
- 	if (error)
- 		goto getout;
- 	if (size)
-@@ -2094,7 +2062,6 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
- 				error = 0;
- 		}
- 	}
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+@@ -726,8 +728,8 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
  
- getout:
- 	kfree(value);
+ 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name, count,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name, count,
+ 		(long long)(page_offset(page) + offset));
+ 
+ 	/* If we're not using byte range locks, and we know the page
 diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
-index 9dc036f..1ad4c84 100644
+index 9dc036f..4bafc01 100644
 --- a/fs/nfsd/export.c
 +++ b/fs/nfsd/export.c
-@@ -64,8 +64,10 @@ static void expkey_put(struct kref *ref)
- 	struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
- 
- 	if (test_bit(CACHE_VALID, &key->h.flags) &&
--	    !test_bit(CACHE_NEGATIVE, &key->h.flags))
--		path_put(&key->ek_path);
-+	    !test_bit(CACHE_NEGATIVE, &key->h.flags)) {
-+		dput(key->ek_path.dentry);
-+		mntput(key->ek_path.mnt);
-+	}
- 	auth_domain_put(key->ek_client);
- 	kfree(key);
- }
-@@ -168,14 +170,16 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+@@ -168,14 +168,15 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
  			goto out;
  
  		dprintk("Found the path %s\n", buf);
@@ -2741,12 +1248,11 @@
  		else
  			err = -ENOMEM;
 -		path_put(&nd.path);
-+		dput(nd.dentry);
-+		mntput(nd.mnt);
++		backport_path_put(&nd);
  	}
  	cache_flush();
   out:
-@@ -204,7 +208,7 @@ static int expkey_show(struct seq_file *m,
+@@ -204,7 +205,7 @@ static int expkey_show(struct seq_file *m,
  	if (test_bit(CACHE_VALID, &h->flags) && 
  	    !test_bit(CACHE_NEGATIVE, &h->flags)) {
  		seq_printf(m, " ");
@@ -2755,28 +1261,7 @@
  	}
  	seq_printf(m, "\n");
  	return 0;
-@@ -241,8 +245,8 @@ static inline void expkey_update(struct cache_head *cnew,
- 	struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
- 	struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
- 
--	new->ek_path = item->ek_path;
--	path_get(&item->ek_path);
-+	new->ek_path.mnt = mntget(item->ek_path.mnt);
-+	new->ek_path.dentry = dget(item->ek_path.dentry);
- }
- 
- static struct cache_head *expkey_alloc(void)
-@@ -330,7 +334,8 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
- static void svc_export_put(struct kref *ref)
- {
- 	struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
--	path_put(&exp->ex_path);
-+	dput(exp->ex_path.dentry);
-+	mntput(exp->ex_path.mnt);
- 	auth_domain_put(exp->ex_client);
- 	kfree(exp->ex_pathname);
- 	nfsd4_fslocs_free(&exp->ex_fslocs);
-@@ -346,7 +351,7 @@ static void svc_export_request(struct cache_detail *cd,
+@@ -346,7 +347,7 @@ static void svc_export_request(struct cache_detail *cd,
  	char *pth;
  
  	qword_add(bpp, blen, exp->ex_client->name);
@@ -2785,7 +1270,16 @@
  	if (IS_ERR(pth)) {
  		/* is this correct? */
  		(*bpp)[0] = '\n';
-@@ -504,7 +509,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+@@ -385,7 +386,7 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
+ 	}
+ 
+ 	if (!inode->i_sb->s_export_op ||
+-	    !inode->i_sb->s_export_op->fh_to_dentry) {
++	    !inode->i_sb->s_export_op->get_dentry) {
+ 		dprintk("exp_export: export of invalid fs type.\n");
+ 		return -EINVAL;
+ 	}
+@@ -504,7 +505,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
  	struct svc_export exp, *expp;
  	int an_int;
  
@@ -2794,7 +1288,7 @@
  	exp.ex_pathname = NULL;
  
  	/* fs locations */
-@@ -544,8 +549,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+@@ -544,8 +545,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
  
  	exp.h.flags = 0;
  	exp.ex_client = dom;
@@ -2805,7 +1299,7 @@
  	exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
  	err = -ENOMEM;
  	if (!exp.ex_pathname)
-@@ -607,7 +612,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+@@ -607,7 +608,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
  				goto out;
  		}
  
@@ -2814,20 +1308,18 @@
  				   exp.ex_uuid);
  		if (err) goto out;
  	}
-@@ -626,8 +631,10 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+@@ -626,8 +627,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
  	nfsd4_fslocs_free(&exp.ex_fslocs);
  	kfree(exp.ex_uuid);
  	kfree(exp.ex_pathname);
 -	if (nd.path.dentry)
 -		path_put(&nd.path);
-+	if (nd.dentry) {
-+		dput(nd.dentry);
-+		mntput(nd.mnt);
-+	}
++	if (nd.dentry)
++		backport_path_put(&nd);
   out_no_path:
  	if (dom)
  		auth_domain_put(dom);
-@@ -650,7 +657,7 @@ static int svc_export_show(struct seq_file *m,
+@@ -650,7 +651,7 @@ static int svc_export_show(struct seq_file *m,
  		return 0;
  	}
  	exp = container_of(h, struct svc_export, h);
@@ -2836,7 +1328,7 @@
  	seq_putc(m, '\t');
  	seq_escape(m, exp->ex_client->name, " \t\n\\");
  	seq_putc(m, '(');
-@@ -1026,7 +1033,7 @@ exp_export(struct nfsctl_export *nxp)
+@@ -1026,7 +1027,7 @@ exp_export(struct nfsctl_export *nxp)
  		goto out_put_clp;
  	err = -EINVAL;
  
@@ -2845,7 +1337,7 @@
  
  	memset(&new, 0, sizeof(new));
  
-@@ -1034,8 +1041,8 @@ exp_export(struct nfsctl_export *nxp)
+@@ -1034,8 +1035,8 @@ exp_export(struct nfsctl_export *nxp)
  	if ((nxp->ex_flags & NFSEXP_FSID) &&
  	    (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
  	    fsid_key->ek_path.mnt &&
@@ -2856,7 +1348,7 @@
  		goto finish;
  
  	if (!IS_ERR(exp)) {
-@@ -1051,7 +1058,7 @@ exp_export(struct nfsctl_export *nxp)
+@@ -1051,7 +1052,7 @@ exp_export(struct nfsctl_export *nxp)
  		goto finish;
  	}
  
@@ -2865,39 +1357,37 @@
  	if (err) goto finish;
  
  	err = -ENOMEM;
-@@ -1064,7 +1071,8 @@ exp_export(struct nfsctl_export *nxp)
+@@ -1064,7 +1065,8 @@ exp_export(struct nfsctl_export *nxp)
  	if (!new.ex_pathname)
  		goto finish;
  	new.ex_client = clp;
 -	new.ex_path = nd.path;
-+	new.ex_path.dentry = nd.dentry;
 +	new.ex_path.mnt = nd.mnt;
++	new.ex_path.dentry = nd.dentry;
  	new.ex_flags = nxp->ex_flags;
  	new.ex_anon_uid = nxp->ex_anon_uid;
  	new.ex_anon_gid = nxp->ex_anon_gid;
-@@ -1090,7 +1098,8 @@ finish:
+@@ -1090,7 +1092,7 @@ finish:
  		exp_put(exp);
  	if (fsid_key && !IS_ERR(fsid_key))
  		cache_put(&fsid_key->h, &svc_expkey_cache);
 -	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
++	backport_path_put(&nd);
  out_put_clp:
  	auth_domain_put(clp);
  out_unlock:
-@@ -1143,8 +1152,9 @@ exp_unexport(struct nfsctl_export *nxp)
+@@ -1143,8 +1145,8 @@ exp_unexport(struct nfsctl_export *nxp)
  		goto out_domain;
  
  	err = -EINVAL;
 -	exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
 -	path_put(&nd.path);
 +	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
++	backport_path_put(&nd);
  	if (IS_ERR(exp))
  		goto out_domain;
  
-@@ -1180,12 +1190,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+@@ -1180,12 +1182,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
  		printk("nfsd: exp_rootfh path not found %s", path);
  		return err;
  	}
@@ -2913,7 +1403,7 @@
  	if (IS_ERR(exp)) {
  		err = PTR_ERR(exp);
  		goto out;
-@@ -1195,7 +1205,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+@@ -1195,7 +1197,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
  	 * fh must be initialized before calling fh_compose
  	 */
  	fh_init(&fh, maxsize);
@@ -2922,300 +1412,20 @@
  		err = -EINVAL;
  	else
  		err = 0;
-@@ -1203,7 +1213,8 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+@@ -1203,7 +1205,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
  	fh_put(&fh);
  	exp_put(exp);
  out:
 -	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
++	backport_path_put(&nd);
  	return err;
  }
  
-@@ -1213,13 +1224,13 @@ static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
- 	struct svc_export *exp;
- 	struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
- 	if (IS_ERR(ek))
--		return ERR_CAST(ek);
-+		return ERR_PTR(PTR_ERR(ek));
- 
- 	exp = exp_get_by_name(clp, ek->ek_path.mnt, ek->ek_path.dentry, reqp);
- 	cache_put(&ek->h, &svc_expkey_cache);
- 
- 	if (IS_ERR(exp))
--		return ERR_CAST(exp);
-+		return ERR_PTR(PTR_ERR(exp));
- 	return exp;
- }
- 
-diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
-index 294992e..1b2c864 100644
---- a/fs/nfsd/auth.c
-+++ b/fs/nfsd/auth.c
-@@ -12,6 +12,8 @@
- #include <linux/nfsd/export.h>
- #include "auth.h"
- 
-+#define		CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
-+
- int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
- {
- 	struct exp_flavor_info *f;
-@@ -68,12 +70,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
- 	ret = set_current_groups(cred.cr_group_info);
- 	put_group_info(cred.cr_group_info);
- 	if ((cred.cr_uid)) {
--		current->cap_effective =
--			cap_drop_nfsd_set(current->cap_effective);
-+		cap_t(current->cap_effective) &= ~CAP_NFSD_MASK;
- 	} else {
--		current->cap_effective =
--			cap_raise_nfsd_set(current->cap_effective,
--					   current->cap_permitted);
-+		cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
-+				current->cap_permitted);
- 	}
- 	return ret;
- }
-diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index e5b51ff..72bd1e2 100644
---- a/fs/nfsd/nfs4proc.c
-+++ b/fs/nfsd/nfs4proc.c
-@@ -661,19 +661,14 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 			return status;
- 		}
- 	}
--	status = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
--	if (status)
--		return status;
- 	status = nfs_ok;
- 	if (setattr->sa_acl != NULL)
- 		status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
- 					    setattr->sa_acl);
- 	if (status)
--		goto out;
-+		return status;
- 	status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
- 				0, (time_t)0);
--out:
--	mnt_drop_write(cstate->current_fh.fh_export->ex_path.mnt);
- 	return status;
- }
- 
-diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index 1578d7a..2670a12 100644
---- a/fs/nfsd/nfs4state.c
-+++ b/fs/nfsd/nfs4state.c
-@@ -256,7 +256,7 @@ nfs4_close_delegation(struct nfs4_delegation *dp)
- 	/* The following nfsd_close may not actually close the file,
- 	 * but we want to remove the lease in any case. */
- 	if (dp->dl_flock)
--		vfs_setlease(filp, F_UNLCK, &dp->dl_flock);
-+		setlease(filp, F_UNLCK, &dp->dl_flock);
- 	nfsd_close(filp);
- }
- 
-@@ -1258,7 +1258,7 @@ static inline void
- nfs4_file_downgrade(struct file *filp, unsigned int share_access)
- {
- 	if (share_access & NFS4_SHARE_ACCESS_WRITE) {
--		drop_file_write_access(filp);
-+		put_write_access(filp->f_dentry->d_inode);
- 		filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
- 	}
- }
-@@ -1576,7 +1576,7 @@ static __be32
- nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
- {
- 	struct file *filp = stp->st_vfs_file;
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	unsigned int share_access, new_writer;
- 	__be32 status;
- 
-@@ -1588,10 +1588,6 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_sta
- 		int err = get_write_access(inode);
- 		if (err)
- 			return nfserrno(err);
--		err = mnt_want_write(cur_fh->fh_export->ex_path.mnt);
--		if (err)
--			return nfserrno(err);
--		file_take_write(filp);
- 	}
- 	status = nfsd4_truncate(rqstp, cur_fh, open);
- 	if (status) {
-@@ -1667,10 +1663,10 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
- 	fl.fl_file = stp->st_vfs_file;
- 	fl.fl_pid = current->tgid;
- 
--	/* vfs_setlease checks to see if delegation should be handed out.
-+	/* setlease checks to see if delegation should be handed out.
- 	 * the lock_manager callbacks fl_mylease and fl_change are used
- 	 */
--	if ((status = vfs_setlease(stp->st_vfs_file, fl.fl_type, &flp))) {
-+	if ((status = setlease(stp->st_vfs_file, fl.fl_type, &flp))) {
- 		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
- 		unhash_delegation(dp);
- 		flag = NFS4_OPEN_DELEGATE_NONE;
-@@ -1923,7 +1919,7 @@ search_close_lru(u32 st_id, int flags)
- static inline int
- nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
- {
--	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode;
-+	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_dentry->d_inode;
- }
- 
- static int
-@@ -1994,7 +1990,7 @@ static inline int
- io_during_grace_disallowed(struct inode *inode, int flags)
- {
- 	return nfs4_in_grace() && (flags & (RD_STATE | WR_STATE))
--		&& mandatory_lock(inode);
-+		&& MANDATORY_LOCK(inode);
- }
- 
- static int check_stateid_generation(stateid_t *in, stateid_t *ref)
-@@ -2734,7 +2730,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 	* Note: locks.c uses the BKL to protect the inode's lock list.
- 	*/
- 
--	err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
-+	conflock.fl_ops = NULL;
-+	conflock.fl_lmops = NULL;
-+	err = posix_lock_file_conf(filp, &file_lock, &conflock);
- 	switch (-err) {
- 	case 0: /* success! */
- 		update_stateid(&lock_stp->st_stateid);
-@@ -2751,7 +2749,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 		status = nfserr_deadlock;
- 		break;
- 	default:        
--		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
-+		dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",err);
- 		status = nfserr_resource;
- 		break;
- 	}
-@@ -2776,7 +2774,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 	struct inode *inode;
- 	struct file file;
- 	struct file_lock file_lock;
--	int error;
-+	struct file_lock conflock;
- 	__be32 status;
- 
- 	if (nfs4_in_grace())
-@@ -2832,23 +2830,19 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 
- 	nfs4_transform_lock_offset(&file_lock);
- 
--	/* vfs_test_lock uses the struct file _only_ to resolve the inode.
-+	/* posix_test_lock uses the struct file _only_ to resolve the inode.
- 	 * since LOCKT doesn't require an OPEN, and therefore a struct
--	 * file may not exist, pass vfs_test_lock a struct file with
-+	 * file may not exist, pass posix_test_lock a struct file with
- 	 * only the dentry:inode set.
- 	 */
- 	memset(&file, 0, sizeof (struct file));
--	file.f_path.dentry = cstate->current_fh.fh_dentry;
-+	file.f_dentry = cstate->current_fh.fh_dentry;
- 
- 	status = nfs_ok;
--	error = vfs_test_lock(&file, &file_lock);
--	if (error) {
--		status = nfserrno(error);
--		goto out;
--	}
-+	posix_test_lock(&file, &file_lock, &conflock);
- 	if (file_lock.fl_type != F_UNLCK) {
- 		status = nfserr_denied;
--		nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
-+		nfs4_set_lock_denied(&conflock, &lockt->lt_denied);
- 	}
- out:
- 	nfs4_unlock_state();
-@@ -2901,9 +2895,9 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 	/*
- 	*  Try to unlock the file in the VFS.
- 	*/
--	err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
-+	err = posix_lock_file(filp, &file_lock);
- 	if (err) {
--		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
-+		dprintk("NFSD: nfs4_locku: posix_lock_file failed!\n");
- 		goto out_nfserr;
- 	}
- 	/*
-@@ -2934,7 +2928,7 @@ static int
- check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
- {
- 	struct file_lock **flpp;
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	int status = 0;
- 
- 	lock_kernel();
-@@ -3294,11 +3288,12 @@ nfs4_reset_recoverydir(char *recdir)
- 	if (status)
- 		return status;
- 	status = -ENOTDIR;
--	if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
-+	if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
- 		nfs4_set_recdir(recdir);
- 		status = 0;
- 	}
--	path_put(&nd.path);
-+	dput(nd.dentry);
-+	mntput(nd.mnt);
- 	return status;
- }
- 
 diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
-index 145b3c8..ceb9781 100644
+index 145b3c8..ad22c29 100644
 --- a/fs/nfsd/nfs4recover.c
 +++ b/fs/nfsd/nfs4recover.c
-@@ -88,42 +88,41 @@ __be32
- nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
- {
- 	struct xdr_netobj cksum;
--	struct hash_desc desc;
-+	struct crypto_tfm *tfm;
- 	struct scatterlist sg;
- 	__be32 status = nfserr_resource;
- 
- 	dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
- 			clname->len, clname->data);
--	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
--	desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
--	if (IS_ERR(desc.tfm))
--		goto out_no_tfm;
--	cksum.len = crypto_hash_digestsize(desc.tfm);
-+	tfm = crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP);
-+	if (tfm == NULL)
-+		goto out;
-+	cksum.len = crypto_tfm_alg_digestsize(tfm);
- 	cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- 	if (cksum.data == NULL)
-  		goto out;
-+	crypto_digest_init(tfm);
- 
- 	sg_init_one(&sg, clname->data, clname->len);
- 
--	if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data))
--		goto out;
-+	crypto_digest_update(tfm, &sg, 1);
-+	crypto_digest_final(tfm, cksum.data);
- 
- 	md5_to_hex(dname, cksum.data);
- 
- 	kfree(cksum.data);
- 	status = nfs_ok;
- out:
--	crypto_free_hash(desc.tfm);
--out_no_tfm:
-+	crypto_free_tfm(tfm);
- 	return status;
- }
- 
+@@ -121,9 +121,9 @@ out_no_tfm:
  static void
  nfsd4_sync_rec_dir(void)
  {
@@ -3228,7 +1438,7 @@
  }
  
  int
-@@ -143,9 +142,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+@@ -143,9 +143,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
  	nfs4_save_user(&uid, &gid);
  
  	/* lock the parent */
@@ -3240,16 +1450,18 @@
  	if (IS_ERR(dentry)) {
  		status = PTR_ERR(dentry);
  		goto out_unlock;
-@@ -155,15 +154,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+@@ -155,15 +155,15 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
  		dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
  		goto out_put;
  	}
 -	status = mnt_want_write(rec_dir.path.mnt);
--	if (status)
--		goto out_put;
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out_put;
 -	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
 -	mnt_drop_write(rec_dir.path.mnt);
 +	status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU);
++	mnt_drop_write(rec_dir.mnt);
  out_put:
  	dput(dentry);
  out_unlock:
@@ -3258,7 +1470,7 @@
  	if (status == 0) {
  		clp->cl_firststate = 1;
  		nfsd4_sync_rec_dir();
-@@ -226,7 +221,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
+@@ -226,7 +226,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
  
  	nfs4_save_user(&uid, &gid);
  
@@ -3267,7 +1479,7 @@
  	status = PTR_ERR(filp);
  	if (IS_ERR(filp))
  		goto out;
-@@ -291,9 +286,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+@@ -291,9 +291,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
  
  	dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
  
@@ -3280,7 +1492,7 @@
  	if (IS_ERR(dentry)) {
  		status = PTR_ERR(dentry);
  		return status;
-@@ -302,7 +297,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+@@ -302,7 +302,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
  	if (!dentry->d_inode)
  		goto out;
  
@@ -3289,46 +1501,47 @@
  out:
  	dput(dentry);
  	return status;
-@@ -318,17 +313,12 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+@@ -318,7 +318,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
  	if (!rec_dir_init || !clp->cl_firststate)
  		return;
  
 -	status = mnt_want_write(rec_dir.path.mnt);
--	if (status)
--		goto out;
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
  	clp->cl_firststate = 0;
- 	nfs4_save_user(&uid, &gid);
- 	status = nfsd4_unlink_clid_dir(clp->cl_recdir, HEXDIR_LEN-1);
+@@ -327,7 +327,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
  	nfs4_reset_user(uid, gid);
  	if (status == 0)
  		nfsd4_sync_rec_dir();
 -	mnt_drop_write(rec_dir.path.mnt);
--out:
++	mnt_drop_write(rec_dir.mnt);
+ out:
  	if (status)
  		printk("NFSD: Failed to remove expired client state directory"
- 				" %.*s\n", HEXDIR_LEN, clp->cl_recdir);
-@@ -357,17 +347,13 @@ nfsd4_recdir_purge_old(void) {
+@@ -357,17 +357,17 @@ nfsd4_recdir_purge_old(void) {
  
  	if (!rec_dir_init)
  		return;
 -	status = mnt_want_write(rec_dir.path.mnt);
--	if (status)
--		goto out;
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
 -	status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
 +	status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
  	if (status == 0)
  		nfsd4_sync_rec_dir();
 -	mnt_drop_write(rec_dir.path.mnt);
--out:
++	mnt_drop_write(rec_dir.mnt);
+ out:
  	if (status)
  		printk("nfsd4: failed to purge old clients from recovery"
 -			" directory %s\n", rec_dir.path.dentry->d_name.name);
 +			" directory %s\n", rec_dir.dentry->d_name.name);
-+	return;
  }
  
  static int
-@@ -387,10 +373,10 @@ int
+@@ -387,10 +387,10 @@ int
  nfsd4_recdir_load(void) {
  	int status;
  
@@ -3341,11 +1554,1279 @@
  	return status;
  }
  
-@@ -429,5 +415,6 @@ nfsd4_shutdown_recdir(void)
+@@ -429,5 +429,5 @@ nfsd4_shutdown_recdir(void)
  	if (!rec_dir_init)
  		return;
  	rec_dir_init = 0;
 -	path_put(&rec_dir.path);
-+	dput(rec_dir.dentry);
-+	mntput(rec_dir.mnt);
++	backport_path_put(&rec_dir);
  }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 1578d7a..1c6df07 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1576,7 +1576,7 @@ static __be32
+ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
+ {
+ 	struct file *filp = stp->st_vfs_file;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	unsigned int share_access, new_writer;
+ 	__be32 status;
+ 
+@@ -1923,7 +1923,7 @@ search_close_lru(u32 st_id, int flags)
+ static inline int
+ nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
+ {
+-	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode;
++	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_dentry->d_inode;
+ }
+ 
+ static int
+@@ -2838,7 +2838,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	 * only the dentry:inode set.
+ 	 */
+ 	memset(&file, 0, sizeof (struct file));
+-	file.f_path.dentry = cstate->current_fh.fh_dentry;
++	file.f_dentry = cstate->current_fh.fh_dentry;
+ 
+ 	status = nfs_ok;
+ 	error = vfs_test_lock(&file, &file_lock);
+@@ -2934,7 +2934,7 @@ static int
+ check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
+ {
+ 	struct file_lock **flpp;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	int status = 0;
+ 
+ 	lock_kernel();
+@@ -3294,11 +3294,11 @@ nfs4_reset_recoverydir(char *recdir)
+ 	if (status)
+ 		return status;
+ 	status = -ENOTDIR;
+-	if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
++	if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
+ 		nfs4_set_recdir(recdir);
+ 		status = 0;
+ 	}
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return status;
+ }
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index c53e65f..fc2871b 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -121,7 +121,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+ 
+ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+ {
+-	ino_t ino =  file->f_path.dentry->d_inode->i_ino;
++	ino_t ino = file->f_dentry->d_inode->i_ino;
+ 	char *data;
+ 	ssize_t rv;
+ 
+@@ -360,9 +360,9 @@ static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+ 	if (error)
+ 		return error;
+ 
+-	error = nlmsvc_unlock_all_by_sb(nd.path.mnt->mnt_sb);
++	error = nlmsvc_unlock_all_by_sb(nd.mnt->mnt_sb);
+ 
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return error;
+ }
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 80292ff..47eb160 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -574,3 +574,5 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 	nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
+ 	return 1;
+ }
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 18060be..becacce 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -23,7 +23,6 @@
+ #include <linux/file.h>
+ #include <linux/mount.h>
+ #include <linux/major.h>
+-#include <linux/splice.h>
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -766,10 +765,10 @@ static int
+ nfsd_sync(struct file *filp)
+ {
+         int err;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
+-	dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name);
++	struct inode *inode = filp->f_dentry->d_inode;
++	dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
+ 	mutex_lock(&inode->i_mutex);
+-	err=nfsd_dosync(filp, filp->f_path.dentry, filp->f_op);
++	err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
+ 	mutex_unlock(&inode->i_mutex);
+ 
+ 	return err;
+@@ -828,53 +827,39 @@ found:
+ 	return ra;
+ }
+ 
+-/*
+- * Grab and keep cached pages associated with a file in the svc_rqst
+- * so that they can be passed to the network sendmsg/sendpage routines
+- * directly. They will be released after the sending has completed.
+- */
+ static int
+-nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+-		  struct splice_desc *sd)
++nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset , unsigned long size)
+ {
+-	struct svc_rqst *rqstp = sd->u.data;
++	unsigned long count = desc->count;
++	struct svc_rqst *rqstp = desc->arg.data;
+ 	struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
+-	struct page *page = buf->page;
+-	size_t size;
+-	int ret;
+-
+-	ret = buf->ops->confirm(pipe, buf);
+-	if (unlikely(ret))
+-		return ret;
+ 
+-	size = sd->len;
++	if (size > count)
++		size = count;
+ 
+ 	if (rqstp->rq_res.page_len == 0) {
+ 		get_page(page);
+-		put_page(*pp);
+-		*pp = page;
+-		rqstp->rq_resused++;
+-		rqstp->rq_res.page_base = buf->offset;
++		if (*pp)
++			put_page(*pp);
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
++		rqstp->rq_res.page_base = offset;
+ 		rqstp->rq_res.page_len = size;
+-	} else if (page != pp[-1]) {
++	} else if (page != rqstp->rq_respages[rqstp->rq_resused-1]) {
+ 		get_page(page);
+ 		if (*pp)
+ 			put_page(*pp);
+ 		*pp = page;
+-		rqstp->rq_resused++;
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
+ 		rqstp->rq_res.page_len += size;
+-	} else
++	} else {
+ 		rqstp->rq_res.page_len += size;
++	}
+ 
++	desc->count = count - size;
++	desc->written += size;
+ 	return size;
+ }
+ 
+-static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
+-				    struct splice_desc *sd)
+-{
+-	return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
+-}
+-
+ static inline int svc_msnfs(struct svc_fh *ffhp)
+ {
+ #ifdef MSNFS
+@@ -895,7 +880,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	int		host_err;
+ 
+ 	err = nfserr_perm;
+-	inode = file->f_path.dentry->d_inode;
++	inode = file->f_dentry->d_inode;
+ 
+ 	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
+ 		goto out;
+@@ -906,16 +891,9 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	if (ra && ra->p_set)
+ 		file->f_ra = ra->p_ra;
+ 
+-	if (file->f_op->splice_read && rqstp->rq_splice_ok) {
+-		struct splice_desc sd = {
+-			.len		= 0,
+-			.total_len	= *count,
+-			.pos		= offset,
+-			.u.data		= rqstp,
+-		};
+-
++	if (file->f_op->sendfile && rqstp->rq_sendfile_ok) {
+ 		rqstp->rq_resused = 1;
+-		host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
++		host_err = file->f_op->sendfile(file, &offset, *count, nfsd_read_actor, rqstp);
+ 	} else {
+ 		oldfs = get_fs();
+ 		set_fs(KERNEL_DS);
+@@ -937,7 +915,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 		nfsdstats.io_read += host_err;
+ 		*count = host_err;
+ 		err = 0;
+-		fsnotify_access(file->f_path.dentry);
++		fsnotify_access(file->f_dentry);
+ 	} else 
+ 		err = nfserrno(host_err);
+ out:
+@@ -971,11 +949,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	err = nfserr_perm;
+ 
+ 	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
+-		(!lock_may_write(file->f_path.dentry->d_inode, offset, cnt)))
++		(!lock_may_write(file->f_dentry->d_inode, offset, cnt)))
+ 		goto out;
+ #endif
+ 
+-	dentry = file->f_path.dentry;
++	dentry = file->f_dentry;
+ 	inode = dentry->d_inode;
+ 	exp   = fhp->fh_export;
+ 
+@@ -1004,7 +982,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	set_fs(oldfs);
+ 	if (host_err >= 0) {
+ 		nfsdstats.io_write += cnt;
+-		fsnotify_modify(file->f_path.dentry);
++		fsnotify_modify(file->f_dentry);
+ 	}
+ 
+ 	/* clear setuid/setgid flag after write */
+diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
+index 27e772c..d932fb1 100644
+--- a/include/linux/exportfs.h
++++ b/include/linux/exportfs.h
+@@ -89,85 +89,9 @@ struct fid {
+ 	};
+ };
+ 
+-/**
+- * struct export_operations - for nfsd to communicate with file systems
+- * @encode_fh:      encode a file handle fragment from a dentry
+- * @fh_to_dentry:   find the implied object and get a dentry for it
+- * @fh_to_parent:   find the implied object's parent and get a dentry for it
+- * @get_name:       find the name for a given inode in a given directory
+- * @get_parent:     find the parent of a given directory
+- *
+- * See Documentation/filesystems/Exporting for details on how to use
+- * this interface correctly.
+- *
+- * encode_fh:
+- *    @encode_fh should store in the file handle fragment @fh (using at most
+- *    @max_len bytes) information that can be used by @decode_fh to recover the
+- *    file refered to by the &struct dentry @de.  If the @connectable flag is
+- *    set, the encode_fh() should store sufficient information so that a good
+- *    attempt can be made to find not only the file but also it's place in the
+- *    filesystem.   This typically means storing a reference to de->d_parent in
+- *    the filehandle fragment.  encode_fh() should return the number of bytes
+- *    stored or a negative error code such as %-ENOSPC
+- *
+- * fh_to_dentry:
+- *    @fh_to_dentry is given a &struct super_block (@sb) and a file handle
+- *    fragment (@fh, @fh_len). It should return a &struct dentry which refers
+- *    to the same file that the file handle fragment refers to.  If it cannot,
+- *    it should return a %NULL pointer if the file was found but no acceptable
+- *    &dentries were available, or an %ERR_PTR error code indicating why it
+- *    couldn't be found (e.g. %ENOENT or %ENOMEM).  Any suitable dentry can be
+- *    returned including, if necessary, a new dentry created with d_alloc_root.
+- *    The caller can then find any other extant dentries by following the
+- *    d_alias links.
+- *
+- * fh_to_parent:
+- *    Same as @fh_to_dentry, except that it returns a pointer to the parent
+- *    dentry if it was encoded into the filehandle fragment by @encode_fh.
+- *
+- * get_name:
+- *    @get_name should find a name for the given @child in the given @parent
+- *    directory.  The name should be stored in the @name (with the
+- *    understanding that it is already pointing to a a %NAME_MAX+1 sized
+- *    buffer.   get_name() should return %0 on success, a negative error code
+- *    or error.  @get_name will be called without @parent->i_mutex held.
+- *
+- * get_parent:
+- *    @get_parent should find the parent directory for the given @child which
+- *    is also a directory.  In the event that it cannot be found, or storage
+- *    space cannot be allocated, a %ERR_PTR should be returned.
+- *
+- * Locking rules:
+- *    get_parent is called with child->d_inode->i_mutex down
+- *    get_name is not (which is possibly inconsistent)
+- */
+-
+-struct export_operations {
+-	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+-			int connectable);
+-	struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	int (*get_name)(struct dentry *parent, char *name,
+-			struct dentry *child);
+-	struct dentry * (*get_parent)(struct dentry *child);
+-};
+-
+ extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
+ 	int *max_len, int connectable);
+ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+ 	void *context);
+-
+-/*
+- * Generic helpers for filesystems.
+- */
+-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-
+ #endif /* LINUX_EXPORTFS_H */
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index dbb87ab..9236e80 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -230,7 +230,7 @@ int           nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+ 
+ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
+ {
+-	return file->f_file->f_path.dentry->d_inode;
++	return file->f_file->f_dentry->d_inode;
+ }
+ 
+ /*
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 78a5922..e59d828 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -9,6 +9,7 @@
+ #ifndef _LINUX_NFS_FS_H
+ #define _LINUX_NFS_FS_H
+ 
++#include <linux/path.h>
+ #include <linux/magic.h>
+ 
+ /* Default timeout values */
+@@ -331,7 +332,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+ extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-extern int nfs_permission(struct inode *, int);
++extern int nfs_permission(struct inode *, int, struct nameidata *);
+ extern int nfs_open(struct inode *, struct file *);
+ extern int nfs_release(struct inode *, struct file *);
+ extern int nfs_attribute_timeout(struct inode *inode);
+@@ -358,9 +359,9 @@ static inline void nfs_fattr_init(struct nfs_fattr *fattr)
+ /*
+  * linux/fs/nfs/file.c
+  */
+-extern const struct inode_operations nfs_file_inode_operations;
++extern struct inode_operations nfs_file_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_file_inode_operations;
++extern struct inode_operations nfs3_file_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_file_operations;
+ extern const struct address_space_operations nfs_file_aops;
+@@ -408,9 +409,9 @@ extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ /*
+  * linux/fs/nfs/dir.c
+  */
+-extern const struct inode_operations nfs_dir_inode_operations;
++extern struct inode_operations nfs_dir_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_dir_inode_operations;
++extern struct inode_operations nfs3_dir_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_dir_operations;
+ extern struct dentry_operations nfs_dentry_operations;
+@@ -423,7 +424,7 @@ extern void nfs_access_zap_cache(struct inode *inode);
+ /*
+  * linux/fs/nfs/symlink.c
+  */
+-extern const struct inode_operations nfs_symlink_inode_operations;
++extern struct inode_operations nfs_symlink_inode_operations;
+ 
+ /*
+  * linux/fs/nfs/sysctl.c
+@@ -439,8 +440,8 @@ extern void nfs_unregister_sysctl(void);
+ /*
+  * linux/fs/nfs/namespace.c
+  */
+-extern const struct inode_operations nfs_mountpoint_inode_operations;
+-extern const struct inode_operations nfs_referral_inode_operations;
++extern struct inode_operations nfs_mountpoint_inode_operations;
++extern struct inode_operations nfs_referral_inode_operations;
+ extern int nfs_mountpoint_expiry_timeout;
+ extern void nfs_release_automount_timer(void);
+ 
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 8c77c11..d9007dc 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -782,8 +782,8 @@ struct nfs_access_entry;
+ struct nfs_rpc_ops {
+ 	u32	version;		/* Protocol version */
+ 	struct dentry_operations *dentry_ops;
+-	const struct inode_operations *dir_inode_ops;
+-	const struct inode_operations *file_inode_ops;
++	struct inode_operations *dir_inode_ops;
++	struct inode_operations *file_inode_ops;
+ 
+ 	int	(*getroot) (struct nfs_server *, struct nfs_fh *,
+ 			    struct nfs_fsinfo *);
+diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
+index 5431512..3753e4b 100644
+--- a/include/linux/nfsd/export.h
++++ b/include/linux/nfsd/export.h
+@@ -15,6 +15,7 @@
+ # include <linux/types.h>
+ # include <linux/in.h>
+ #endif
++#include <linux/path.h>
+ 
+ /*
+  * Important limits for the exports stuff.
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+deleted file mode 100644
+index 8e41202..0000000
+--- a/include/linux/pipe_fs_i.h
++++ /dev/null
+@@ -1,151 +0,0 @@
+-#ifndef _LINUX_PIPE_FS_I_H
+-#define _LINUX_PIPE_FS_I_H
+-
+-#define PIPEFS_MAGIC 0x50495045
+-
+-#define PIPE_BUFFERS (16)
+-
+-#define PIPE_BUF_FLAG_LRU	0x01	/* page is on the LRU */
+-#define PIPE_BUF_FLAG_ATOMIC	0x02	/* was atomically mapped */
+-#define PIPE_BUF_FLAG_GIFT	0x04	/* page is a gift */
+-
+-/**
+- *	struct pipe_buffer - a linux kernel pipe buffer
+- *	@page: the page containing the data for the pipe buffer
+- *	@offset: offset of data inside the @page
+- *	@len: length of data inside the @page
+- *	@ops: operations associated with this buffer. See @pipe_buf_operations.
+- *	@flags: pipe buffer flags. See above.
+- *	@private: private data owned by the ops.
+- **/
+-struct pipe_buffer {
+-	struct page *page;
+-	unsigned int offset, len;
+-	const struct pipe_buf_operations *ops;
+-	unsigned int flags;
+-	unsigned long private;
+-};
+-
+-/**
+- *	struct pipe_inode_info - a linux kernel pipe
+- *	@wait: reader/writer wait point in case of empty/full pipe
+- *	@nrbufs: the number of non-empty pipe buffers in this pipe
+- *	@curbuf: the current pipe buffer entry
+- *	@tmp_page: cached released page
+- *	@readers: number of current readers of this pipe
+- *	@writers: number of current writers of this pipe
+- *	@waiting_writers: number of writers blocked waiting for room
+- *	@r_counter: reader counter
+- *	@w_counter: writer counter
+- *	@fasync_readers: reader side fasync
+- *	@fasync_writers: writer side fasync
+- *	@inode: inode this pipe is attached to
+- *	@bufs: the circular array of pipe buffers
+- **/
+-struct pipe_inode_info {
+-	wait_queue_head_t wait;
+-	unsigned int nrbufs, curbuf;
+-	struct page *tmp_page;
+-	unsigned int readers;
+-	unsigned int writers;
+-	unsigned int waiting_writers;
+-	unsigned int r_counter;
+-	unsigned int w_counter;
+-	struct fasync_struct *fasync_readers;
+-	struct fasync_struct *fasync_writers;
+-	struct inode *inode;
+-	struct pipe_buffer bufs[PIPE_BUFFERS];
+-};
+-
+-/*
+- * Note on the nesting of these functions:
+- *
+- * ->confirm()
+- *	->steal()
+- *	...
+- *	->map()
+- *	...
+- *	->unmap()
+- *
+- * That is, ->map() must be called on a confirmed buffer,
+- * same goes for ->steal(). See below for the meaning of each
+- * operation. Also see kerneldoc in fs/pipe.c for the pipe
+- * and generic variants of these hooks.
+- */
+-struct pipe_buf_operations {
+-	/*
+-	 * This is set to 1, if the generic pipe read/write may coalesce
+-	 * data into an existing buffer. If this is set to 0, a new pipe
+-	 * page segment is always used for new data.
+-	 */
+-	int can_merge;
+-
+-	/*
+-	 * ->map() returns a virtual address mapping of the pipe buffer.
+-	 * The last integer flag reflects whether this should be an atomic
+-	 * mapping or not. The atomic map is faster, however you can't take
+-	 * page faults before calling ->unmap() again. So if you need to eg
+-	 * access user data through copy_to/from_user(), then you must get
+-	 * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
+-	 * atomic maps, so you can't map more than one pipe_buffer at once
+-	 * and you have to be careful if mapping another page as source
+-	 * or destination for a copy (IOW, it has to use something else
+-	 * than KM_USER0).
+-	 */
+-	void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
+-
+-	/*
+-	 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
+-	 */
+-	void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-
+-	/*
+-	 * ->confirm() verifies that the data in the pipe buffer is there
+-	 * and that the contents are good. If the pages in the pipe belong
+-	 * to a file system, we may need to wait for IO completion in this
+-	 * hook. Returns 0 for good, or a negative error value in case of
+-	 * error.
+-	 */
+-	int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * When the contents of this pipe buffer has been completely
+-	 * consumed by a reader, ->release() is called.
+-	 */
+-	void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Attempt to take ownership of the pipe buffer and its contents.
+-	 * ->steal() returns 0 for success, in which case the contents
+-	 * of the pipe (the buf->page) is locked and now completely owned
+-	 * by the caller. The page may then be transferred to a different
+-	 * mapping, the most often used case is insertion into different
+-	 * file address space cache.
+-	 */
+-	int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Get a reference to the pipe buffer.
+-	 */
+-	void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+-};
+-
+-/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+-   memory allocation, whereas PIPE_BUF makes atomicity guarantees.  */
+-#define PIPE_SIZE		PAGE_SIZE
+-
+-/* Drop the inode semaphore and wait for a pipe event, atomically */
+-void pipe_wait(struct pipe_inode_info *pipe);
+-
+-struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
+-void free_pipe_info(struct inode * inode);
+-void __free_pipe_info(struct pipe_inode_info *);
+-
+-/* Generic pipe buffer ops functions */
+-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
+-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-#endif
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+deleted file mode 100644
+index 528dcb9..0000000
+--- a/include/linux/splice.h
++++ /dev/null
+@@ -1,74 +0,0 @@
+-/*
+- * Function declerations and data structures related to the splice
+- * implementation.
+- *
+- * Copyright (C) 2007 Jens Axboe <jens.axboe at oracle.com>
+- *
+- */
+-#ifndef SPLICE_H
+-#define SPLICE_H
+-
+-#include <linux/pipe_fs_i.h>
+-
+-/*
+- * splice is tied to pipes as a transport (at least for now), so we'll just
+- * add the splice flags here.
+- */
+-#define SPLICE_F_MOVE	(0x01)	/* move pages instead of copying */
+-#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+-				 /* we may still block on the fd we splice */
+-				 /* from/to, of course */
+-#define SPLICE_F_MORE	(0x04)	/* expect more data */
+-#define SPLICE_F_GIFT	(0x08)	/* pages passed in are a gift */
+-
+-/*
+- * Passed to the actors
+- */
+-struct splice_desc {
+-	unsigned int len, total_len;	/* current and remaining length */
+-	unsigned int flags;		/* splice flags */
+-	/*
+-	 * actor() private data
+-	 */
+-	union {
+-		void __user *userptr;	/* memory to write to */
+-		struct file *file;	/* file to read/write */
+-		void *data;		/* cookie */
+-	} u;
+-	loff_t pos;			/* file position */
+-};
+-
+-struct partial_page {
+-	unsigned int offset;
+-	unsigned int len;
+-	unsigned long private;
+-};
+-
+-/*
+- * Passed to splice_to_pipe
+- */
+-struct splice_pipe_desc {
+-	struct page **pages;		/* page map */
+-	struct partial_page *partial;	/* pages[] may not be contig */
+-	int nr_pages;			/* number of pages in map */
+-	unsigned int flags;		/* splice flags */
+-	const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+-	void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+-};
+-
+-typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
+-			   struct splice_desc *);
+-typedef int (splice_direct_actor)(struct pipe_inode_info *,
+-				  struct splice_desc *);
+-
+-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
+-				loff_t *, size_t, unsigned int,
+-				splice_actor *);
+-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
+-				  struct splice_desc *, splice_actor *);
+-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
+-			      struct splice_pipe_desc *);
+-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+-				      splice_direct_actor *);
+-
+-#endif
+diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
+index 10709cb..9bbadbd 100644
+--- a/include/linux/sunrpc/debug.h
++++ b/include/linux/sunrpc/debug.h
+@@ -88,6 +88,7 @@ enum {
+ 	CTL_SLOTTABLE_TCP,
+ 	CTL_MIN_RESVPORT,
+ 	CTL_MAX_RESVPORT,
++	CTL_TRANSPORT,
+ };
+ 
+ #endif /* _LINUX_SUNRPC_DEBUG_H_ */
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index dc69068..3a0f48f 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -255,7 +255,7 @@ struct svc_rqst {
+ 						 * determine what device number
+ 						 * to report (real or virtual)
+ 						 */
+-	int			rq_splice_ok;   /* turned off in gss privacy
++	int			rq_sendfile_ok;   /* turned off in gss privacy
+ 						 * to prevent encrypting page
+ 						 * cache pages */
+ 	wait_queue_head_t	rq_wait;	/* synchronization */
+diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
+index 6bfea9e..f0a110d 100644
+--- a/net/sunrpc/auth.c
++++ b/net/sunrpc/auth.c
+@@ -566,19 +566,16 @@ rpcauth_uptodatecred(struct rpc_task *task)
+ 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
+ }
+ 
+-static struct shrinker rpc_cred_shrinker = {
+-	.shrink = rpcauth_cache_shrinker,
+-	.seeks = DEFAULT_SEEKS,
+-};
++static struct shrinker *rpc_cred_shrinker;
+ 
+ void __init rpcauth_init_module(void)
+ {
+ 	rpc_init_authunix();
+ 	rpc_init_generic_auth();
+-	register_shrinker(&rpc_cred_shrinker);
++	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
+ }
+ 
+ void __exit rpcauth_remove_module(void)
+ {
+-	unregister_shrinker(&rpc_cred_shrinker);
++	remove_shrinker(rpc_cred_shrinker);
+ }
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 853a414..71ba862 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -481,7 +481,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ 	const void *p, *end;
+ 	void *buf;
+ 	struct gss_upcall_msg *gss_msg;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct gss_cl_ctx *ctx;
+ 	uid_t uid;
+ 	ssize_t err = -EFBIG;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 81ae3d6..acfb1d1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -859,7 +859,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+ 	u32 priv_len, maj_stat;
+ 	int pad, saved_len, remaining_len, offset;
+ 
+-	rqstp->rq_splice_ok = 0;
++	rqstp->rq_sendfile_ok = 0;
+ 
+ 	priv_len = svc_getnl(&buf->head[0]);
+ 	if (rqstp->rq_deferred) {
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index c996671..58e606e 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -696,7 +696,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+ {
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_request *rq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 	int err;
+ 
+ 	if (count == 0)
+@@ -773,7 +773,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
+ 	    loff_t *ppos)
+ {
+ 	int err;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 
+ 	if (count == 0)
+ 		return 0;
+@@ -804,7 +804,7 @@ cache_poll(struct file *filp, poll_table *wait)
+ 	unsigned int mask;
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_queue *cq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 
+ 	poll_wait(filp, &queue_wait, wait);
+ 
+@@ -1239,7 +1239,7 @@ static int c_show(struct seq_file *m, void *p)
+ 	return cd->cache_show(m, cd, cp);
+ }
+ 
+-static const struct seq_operations cache_content_op = {
++static struct seq_operations cache_content_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+@@ -1269,7 +1269,7 @@ static const struct file_operations content_file_operations = {
+ static ssize_t read_flush(struct file *file, char __user *buf,
+ 			    size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	unsigned long p = *ppos;
+ 	size_t len;
+@@ -1290,7 +1290,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
+ static ssize_t write_flush(struct file * file, const char __user * buf,
+ 			     size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	char *ep;
+ 	long flushtime;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 76739e9..11bfb52 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -213,10 +213,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
+ 	}
+ 
+ 	/* save the nodename */
+-	clnt->cl_nodelen = strlen(utsname()->nodename);
++	clnt->cl_nodelen = strlen(system_utsname.nodename);
+ 	if (clnt->cl_nodelen > UNX_MAXNODENAME)
+ 		clnt->cl_nodelen = UNX_MAXNODENAME;
+-	memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
++	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
+ 	rpc_register_client(clnt);
+ 	return clnt;
+ 
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 23a2b8f..003a6ec 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -26,6 +26,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/workqueue.h>
+ #include <linux/sunrpc/rpc_pipe_fs.h>
++#include <linux/path.h>
+ 
+ static struct vfsmount *rpc_mount __read_mostly;
+ static int rpc_mount_count;
+@@ -224,7 +225,7 @@ out:
+ static ssize_t
+ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	struct rpc_pipe_msg *msg;
+ 	int res = 0;
+@@ -267,7 +268,7 @@ out_unlock:
+ static ssize_t
+ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	int res;
+ 
+@@ -285,7 +286,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
+ 	struct rpc_inode *rpci;
+ 	unsigned int mask = 0;
+ 
+-	rpci = RPC_I(filp->f_path.dentry->d_inode);
++	rpci = RPC_I(filp->f_dentry->d_inode);
+ 	poll_wait(filp, &rpci->waitq, wait);
+ 
+ 	mask = POLLOUT | POLLWRNORM;
+@@ -300,7 +301,7 @@ static int
+ rpc_pipe_ioctl(struct inode *ino, struct file *filp,
+ 		unsigned int cmd, unsigned long arg)
+ {
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	int len;
+ 
+ 	switch (cmd) {
+@@ -495,7 +496,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
+ static void
+ rpc_release_path(struct nameidata *nd)
+ {
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	rpc_put_mount();
+ }
+ 
+@@ -668,7 +669,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
+ 
+ 	if ((error = rpc_lookup_parent(path, nd)) != 0)
+ 		return ERR_PTR(error);
+-	dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
++	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len,
+ 				   1);
+ 	if (IS_ERR(dentry))
+ 		rpc_release_path(nd);
+@@ -696,7 +697,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
+ 	dentry = rpc_lookup_negative(path, &nd);
+ 	if (IS_ERR(dentry))
+ 		return dentry;
+-	dir = nd.path.dentry->d_inode;
++	dir = nd.dentry->d_inode;
+ 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
+ 		goto err_dput;
+ 	RPC_I(dentry->d_inode)->private = rpc_client;
+@@ -897,7 +898,7 @@ static struct file_system_type rpc_pipe_fs_type = {
+ };
+ 
+ static void
+-init_once(void *foo)
++init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
+ 
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 24db2b4..6f9e46c 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -117,18 +117,6 @@ static void rpcb_map_release(void *data)
+ 	kfree(map);
+ }
+ 
+-static const struct sockaddr_in rpcb_inaddr_loopback = {
+-	.sin_family		= AF_INET,
+-	.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
+-	.sin_port		= htons(RPCBIND_PORT),
+-};
+-
+-static const struct sockaddr_in6 rpcb_in6addr_loopback = {
+-	.sin6_family		= AF_INET6,
+-	.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
+-	.sin6_port		= htons(RPCBIND_PORT),
+-};
+-
+ static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
+ 					  size_t addrlen, u32 version)
+ {
+@@ -249,6 +237,12 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
+ 		.rpc_resp	= okay,
+ 	};
+ 
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
++
+ 	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
+ 			"rpcbind\n", (port ? "" : "un"),
+ 			prog, vers, prot, port);
+@@ -272,6 +266,12 @@ static int rpcb_register_netid4(struct sockaddr_in *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin_port);
+ 	char buf[32];
+ 
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIPQUAD_FMT".%u.%u",
+@@ -303,6 +303,12 @@ static int rpcb_register_netid6(struct sockaddr_in6 *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin6_port);
+ 	char buf[64];
+ 
++	struct sockaddr_in6 rpcb_in6addr_loopback = {
++		.sin6_family		= AF_INET6,
++		.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
++		.sin6_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET6 universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIP6_FMT".%u.%u",
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index 50b049c..5053a5f 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -264,7 +264,7 @@ rpc_proc_init(void)
+ 	dprintk("RPC:       registering /proc/net/rpc\n");
+ 	if (!proc_net_rpc) {
+ 		struct proc_dir_entry *ent;
+-		ent = proc_mkdir("rpc", init_net.proc_net);
++		ent = proc_mkdir("rpc", proc_net);
+ 		if (ent) {
+ 			ent->owner = THIS_MODULE;
+ 			proc_net_rpc = ent;
+@@ -278,7 +278,7 @@ rpc_proc_exit(void)
+ 	dprintk("RPC:       unregistering /proc/net/rpc\n");
+ 	if (proc_net_rpc) {
+ 		proc_net_rpc = NULL;
+-		remove_proc_entry("rpc", init_net.proc_net);
++		remove_proc_entry("rpc", proc_net);
+ 	}
+ }
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 5a32cb7..e0e87c6 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -174,7 +174,7 @@ fail:
+ static int
+ svc_pool_map_init_percpu(struct svc_pool_map *m)
+ {
+-	unsigned int maxpools = nr_cpu_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int cpu;
+ 	int err;
+@@ -202,7 +202,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
+ static int
+ svc_pool_map_init_pernode(struct svc_pool_map *m)
+ {
+-	unsigned int maxpools = nr_node_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int node;
+ 	int err;
+@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
+ 	switch (m->mode) {
+ 	case SVC_POOL_PERCPU:
+ 	{
+-		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
++		set_cpus_allowed(task, cpumask_of_cpu(node));
+ 		break;
+ 	}
+ 	case SVC_POOL_PERNODE:
+ 	{
+-		node_to_cpumask_ptr(nodecpumask, node);
+-		set_cpus_allowed_ptr(task, nodecpumask);
++		set_cpus_allowed(task, node_to_cpumask(node));
+ 		break;
+ 	}
+ 	}
+@@ -831,7 +830,7 @@ svc_process(struct svc_rqst *rqstp)
+ 	rqstp->rq_res.tail[0].iov_base = NULL;
+ 	rqstp->rq_res.tail[0].iov_len = 0;
+ 	/* Will be turned off only in gss privacy case: */
+-	rqstp->rq_splice_ok = 1;
++	rqstp->rq_sendfile_ok = 1;
+ 
+ 	/* Setup reply header */
+ 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index f24800f..b30d725 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -678,7 +678,7 @@ int
+ svcauth_unix_set_client(struct svc_rqst *rqstp)
+ {
+ 	struct sockaddr_in *sin;
+-	struct sockaddr_in6 *sin6, sin6_storage;
++	struct sockaddr_in6 *sin6 = NULL, sin6_storage;
+ 	struct ip_map *ipm;
+ 
+ 	switch (rqstp->rq_addr.ss_family) {
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 3e65719..cbb47a6 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -472,12 +472,16 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
+ 	if (len < 0)
+ 		return len;
+ 	rqstp->rq_addrlen = len;
+-	if (skb->tstamp.tv64 == 0) {
+-		skb->tstamp = ktime_get_real();
++	if (skb->tstamp.off_sec == 0) {
++		struct timeval tv;
++
++		tv.tv_sec = xtime.tv_sec;
++		tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
++		skb_set_timestamp(skb, &tv);
+ 		/* Don't enable netstamp, sunrpc doesn't
+ 		   need that much accuracy */
+ 	}
+-	svsk->sk_sk->sk_stamp = skb->tstamp;
++	skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
+ 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
+ 
+ 	/*
+diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
+index 5231f7a..1482e34 100644
+--- a/net/sunrpc/sysctl.c
++++ b/net/sunrpc/sysctl.c
+@@ -135,6 +135,7 @@ done:
+ 
+ static ctl_table debug_table[] = {
+ 	{
++		.ctl_name	= CTL_RPCDEBUG, 
+ 		.procname	= "rpc_debug",
+ 		.data		= &rpc_debug,
+ 		.maxlen		= sizeof(int),
+@@ -142,6 +143,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDEBUG,
+ 		.procname	= "nfs_debug",
+ 		.data		= &nfs_debug,
+ 		.maxlen		= sizeof(int),
+@@ -149,6 +151,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDDEBUG,
+ 		.procname	= "nfsd_debug",
+ 		.data		= &nfsd_debug,
+ 		.maxlen		= sizeof(int),
+@@ -156,6 +159,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NLMDEBUG,
+ 		.procname	= "nlm_debug",
+ 		.data		= &nlm_debug,
+ 		.maxlen		= sizeof(int),
+@@ -163,6 +167,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_TRANSPORT,
+ 		.procname	= "transports",
+ 		.maxlen		= 256,
+ 		.mode		= 0444,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 6fb493c..761ad29 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -247,10 +247,6 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
+ 
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
+-
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an SQ
+@@ -411,10 +407,6 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
+ 
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
+-
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an RQ
+@@ -1116,9 +1108,6 @@ static void __svc_rdma_free(struct work_struct *work)
+ 		container_of(work, struct svcxprt_rdma, sc_work);
+ 	dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
+ 
+-	/* We should only be called from kref_put */
+-	BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
+-
+ 	/*
+ 	 * Destroy queued, but not processed read completions. Note
+ 	 * that this cleanup has to be done before destroying the
+diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
+index 8710117..ce94fa4 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma.c
++++ b/net/sunrpc/xprtrdma/svc_rdma.c
+@@ -116,6 +116,7 @@ static int read_reset_stat(ctl_table *table, int write,
+ static struct ctl_table_header *svcrdma_table_header;
+ static ctl_table svcrdma_parm_table[] = {
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_requests",
+ 		.data		= &svcrdma_max_requests,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -126,6 +127,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.extra2		= &max_max_requests
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_req_size",
+ 		.data		= &svcrdma_max_req_size,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -136,6 +138,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.extra2		= &max_max_inline
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_outbound_read_requests",
+ 		.data		= &svcrdma_ord,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -147,6 +150,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 	},
+ 
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_read",
+ 		.data		= &rdma_stat_read,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -154,6 +158,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_recv",
+ 		.data		= &rdma_stat_recv,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -161,6 +166,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_write",
+ 		.data		= &rdma_stat_write,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -168,6 +174,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_starve",
+ 		.data		= &rdma_stat_sq_starve,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -175,6 +182,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_starve",
+ 		.data		= &rdma_stat_rq_starve,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -182,6 +190,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_poll",
+ 		.data		= &rdma_stat_rq_poll,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -189,6 +198,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_prod",
+ 		.data		= &rdma_stat_rq_prod,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -196,6 +206,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_poll",
+ 		.data		= &rdma_stat_sq_poll,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -203,6 +214,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_prod",
+ 		.data		= &rdma_stat_sq_prod,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -216,6 +228,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 
+ static ctl_table svcrdma_table[] = {
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "svc_rdma",
+ 		.mode		= 0555,
+ 		.child		= svcrdma_parm_table

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_net.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_net.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.1/rnfs_net.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,1050 +0,0 @@
-diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index 76739e9..11bfb52 100644
---- a/net/sunrpc/clnt.c
-+++ b/net/sunrpc/clnt.c
-@@ -213,10 +213,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
- 	}
- 
- 	/* save the nodename */
--	clnt->cl_nodelen = strlen(utsname()->nodename);
-+	clnt->cl_nodelen = strlen(system_utsname.nodename);
- 	if (clnt->cl_nodelen > UNX_MAXNODENAME)
- 		clnt->cl_nodelen = UNX_MAXNODENAME;
--	memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
-+	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
- 	rpc_register_client(clnt);
- 	return clnt;
- 
-diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
-index a661a3a..b43b2a4 100644
---- a/net/sunrpc/socklib.c
-+++ b/net/sunrpc/socklib.c
-@@ -156,7 +156,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
- 	desc.offset = sizeof(struct udphdr);
- 	desc.count = skb->len - desc.offset;
- 
--	if (skb_csum_unnecessary(skb))
-+	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- 		goto no_checksum;
- 
- 	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
-index 4486c59..95c6124 100644
---- a/net/sunrpc/xprtsock.c
-+++ b/net/sunrpc/xprtsock.c
-@@ -615,22 +615,6 @@ static int xs_udp_send_request(struct rpc_task *task)
- 	return status;
- }
- 
--/**
-- * xs_tcp_shutdown - gracefully shut down a TCP socket
-- * @xprt: transport
-- *
-- * Initiates a graceful shutdown of the TCP socket by calling the
-- * equivalent of shutdown(SHUT_WR);
-- */
--static void xs_tcp_shutdown(struct rpc_xprt *xprt)
--{
--	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
--	struct socket *sock = transport->sock;
--
--	if (sock != NULL)
--		kernel_sock_shutdown(sock, SHUT_WR);
--}
--
- static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
- {
- 	u32 reclen = buf->len - sizeof(rpc_fraghdr);
-@@ -709,7 +693,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
- 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
- 			-status);
- 		clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
--		xs_tcp_shutdown(xprt);
-+		xprt_disconnect_done(xprt);
- 	}
- 
- 	return status;
-@@ -792,7 +776,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
- 
- 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
- 
--	cancel_rearming_delayed_work(&transport->connect_worker);
-+	cancel_rearming_delayed_work(&transport->connect_worker.work);
- 
- 	xs_close(xprt);
- 	xs_free_peer_addresses(xprt);
-@@ -856,12 +840,8 @@ static void xs_udp_data_ready(struct sock *sk, int len)
- 		copied = repsize;
- 
- 	/* Suck it into the iovec, verify checksum if not done by hw. */
--	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
--		UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
-+	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
- 		goto out_unlock;
--	}
--
--	UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
- 
- 	/* Something worked... */
- 	dst_confirm(skb->dst);
-@@ -1670,7 +1650,8 @@ static void xs_tcp_connect_worker4(struct work_struct *work)
- 				break;
- 			default:
- 				/* get rid of existing socket, and retry */
--				xs_tcp_shutdown(xprt);
-+				xs_close(xprt);
-+				break;
- 		}
- 	}
- out:
-@@ -1729,7 +1710,8 @@ static void xs_tcp_connect_worker6(struct work_struct *work)
- 				break;
- 			default:
- 				/* get rid of existing socket, and retry */
--				xs_tcp_shutdown(xprt);
-+				xs_close(xprt);
-+				break;
- 		}
- 	}
- out:
-@@ -1776,19 +1758,6 @@ static void xs_connect(struct rpc_task *task)
- 	}
- }
- 
--static void xs_tcp_connect(struct rpc_task *task)
--{
--	struct rpc_xprt *xprt = task->tk_xprt;
--
--	/* Initiate graceful shutdown of the socket if not already done */
--	if (test_bit(XPRT_CONNECTED, &xprt->state))
--		xs_tcp_shutdown(xprt);
--	/* Exit if we need to wait for socket shutdown to complete */
--	if (test_bit(XPRT_CLOSING, &xprt->state))
--		return;
--	xs_connect(task);
--}
--
- /**
-  * xs_udp_print_stats - display UDP socket-specifc stats
-  * @xprt: rpc_xprt struct containing statistics
-@@ -1859,12 +1828,12 @@ static struct rpc_xprt_ops xs_tcp_ops = {
- 	.release_xprt		= xs_tcp_release_xprt,
- 	.rpcbind		= rpcb_getport_async,
- 	.set_port		= xs_set_port,
--	.connect		= xs_tcp_connect,
-+	.connect		= xs_connect,
- 	.buf_alloc		= rpc_malloc,
- 	.buf_free		= rpc_free,
- 	.send_request		= xs_tcp_send_request,
- 	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
--	.close			= xs_tcp_shutdown,
-+	.close			= xs_close,
- 	.destroy		= xs_destroy,
- 	.print_stats		= xs_tcp_print_stats,
- };
-@@ -2064,7 +2033,7 @@ int init_socket_xprt(void)
- {
- #ifdef RPC_DEBUG
- 	if (!sunrpc_table_header)
--		sunrpc_table_header = register_sysctl_table(sunrpc_table);
-+		sunrpc_table_header = register_sysctl_table(sunrpc_table, 0);
- #endif
- 
- 	xprt_register_transport(&xs_udp_transport);
-diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
-index 385f427..1ac1e08 100644
---- a/net/sunrpc/sched.c
-+++ b/net/sunrpc/sched.c
-@@ -222,9 +222,9 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
- }
- EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
- 
--static int rpc_wait_bit_killable(void *word)
-+static int rpc_wait_bit_interruptible(void *word)
- {
--	if (fatal_signal_pending(current))
-+	if (signal_pending(current))
- 		return -ERESTARTSYS;
- 	schedule();
- 	return 0;
-@@ -276,9 +276,9 @@ static void rpc_mark_complete_task(struct rpc_task *task)
- int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
- {
- 	if (action == NULL)
--		action = rpc_wait_bit_killable;
-+		action = rpc_wait_bit_interruptible;
- 	return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
--			action, TASK_KILLABLE);
-+			action, TASK_INTERRUPTIBLE);
- }
- EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
- 
-@@ -660,8 +660,8 @@ static void __rpc_execute(struct rpc_task *task)
- 		/* sync task: sleep here */
- 		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
- 		status = out_of_line_wait_on_bit(&task->tk_runstate,
--				RPC_TASK_QUEUED, rpc_wait_bit_killable,
--				TASK_KILLABLE);
-+				RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
-+				TASK_INTERRUPTIBLE);
- 		if (status == -ERESTARTSYS) {
- 			/*
- 			 * When a sync task receives a signal, it exits with
-@@ -800,6 +800,8 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
- 		kref_get(&task->tk_client->cl_kref);
- 		if (task->tk_client->cl_softrtry)
- 			task->tk_flags |= RPC_TASK_SOFT;
-+		if (!task->tk_client->cl_intr)
-+                       task->tk_flags |= RPC_TASK_NOINTR;
- 	}
- 
- 	if (task->tk_ops->rpc_call_prepare != NULL)
-@@ -819,7 +821,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
- 	task->tk_start = jiffies;
- 
- 	dprintk("RPC:       new task initialized, procpid %u\n",
--				task_pid_nr(current));
-+				current->pid);
- }
- 
- static struct rpc_task *
-diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
-index 6bfea9e..b73c250 100644
---- a/net/sunrpc/auth.c
-+++ b/net/sunrpc/auth.c
-@@ -566,19 +566,17 @@ rpcauth_uptodatecred(struct rpc_task *task)
- 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
- }
- 
--static struct shrinker rpc_cred_shrinker = {
--	.shrink = rpcauth_cache_shrinker,
--	.seeks = DEFAULT_SEEKS,
--};
-+static struct shrinker *rpc_cred_shrinker;
- 
- void __init rpcauth_init_module(void)
- {
- 	rpc_init_authunix();
- 	rpc_init_generic_auth();
--	register_shrinker(&rpc_cred_shrinker);
-+	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
- }
- 
- void __exit rpcauth_remove_module(void)
- {
--	unregister_shrinker(&rpc_cred_shrinker);
-+	if (rpc_cred_shrinker != NULL)
-+               remove_shrinker(rpc_cred_shrinker);
- }
-diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
-index 5a32cb7..a0f61ff 100644
---- a/net/sunrpc/svc.c
-+++ b/net/sunrpc/svc.c
-@@ -174,7 +174,7 @@ fail:
- static int
- svc_pool_map_init_percpu(struct svc_pool_map *m)
- {
--	unsigned int maxpools = nr_cpu_ids;
-+	unsigned int maxpools = highest_possible_processor_id() + 1;
- 	unsigned int pidx = 0;
- 	unsigned int cpu;
- 	int err;
-@@ -202,7 +202,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
- static int
- svc_pool_map_init_pernode(struct svc_pool_map *m)
- {
--	unsigned int maxpools = nr_node_ids;
-+	unsigned int maxpools = highest_possible_node_id() + 1;
- 	unsigned int pidx = 0;
- 	unsigned int node;
- 	int err;
-@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
- 	switch (m->mode) {
- 	case SVC_POOL_PERCPU:
- 	{
--		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
-+		set_cpus_allowed(current, cpumask_of_cpu(node));
- 		break;
- 	}
- 	case SVC_POOL_PERNODE:
- 	{
--		node_to_cpumask_ptr(nodecpumask, node);
--		set_cpus_allowed_ptr(task, nodecpumask);
-+		set_cpus_allowed(current, node_to_cpumask(node));
- 		break;
- 	}
- 	}
-diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
-index 3e65719..d003a0f 100644
---- a/net/sunrpc/svcsock.c
-+++ b/net/sunrpc/svcsock.c
-@@ -472,12 +472,16 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
- 	if (len < 0)
- 		return len;
- 	rqstp->rq_addrlen = len;
--	if (skb->tstamp.tv64 == 0) {
--		skb->tstamp = ktime_get_real();
-+	if (skb->tstamp.off_sec == 0) {
-+		struct timeval tv;
-+
-+		tv.tv_sec = xtime.tv_sec; 
-+		tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 
-+		skb_set_timestamp(skb, &tv);
- 		/* Don't enable netstamp, sunrpc doesn't
- 		   need that much accuracy */
- 	}
--	svsk->sk_sk->sk_stamp = skb->tstamp;
-+	skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
- 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
- 
- 	/*
-diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
-index 24db2b4..6f9e46c 100644
---- a/net/sunrpc/rpcb_clnt.c
-+++ b/net/sunrpc/rpcb_clnt.c
-@@ -117,18 +117,6 @@ static void rpcb_map_release(void *data)
- 	kfree(map);
- }
- 
--static const struct sockaddr_in rpcb_inaddr_loopback = {
--	.sin_family		= AF_INET,
--	.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
--	.sin_port		= htons(RPCBIND_PORT),
--};
--
--static const struct sockaddr_in6 rpcb_in6addr_loopback = {
--	.sin6_family		= AF_INET6,
--	.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
--	.sin6_port		= htons(RPCBIND_PORT),
--};
--
- static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
- 					  size_t addrlen, u32 version)
- {
-@@ -249,6 +237,12 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
- 		.rpc_resp	= okay,
- 	};
- 
-+	struct sockaddr_in rpcb_inaddr_loopback = {
-+		.sin_family		= AF_INET,
-+		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
-+		.sin_port		= htons(RPCBIND_PORT),
-+	};
-+
- 	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
- 			"rpcbind\n", (port ? "" : "un"),
- 			prog, vers, prot, port);
-@@ -272,6 +266,12 @@ static int rpcb_register_netid4(struct sockaddr_in *address_to_register,
- 	unsigned short port = ntohs(address_to_register->sin_port);
- 	char buf[32];
- 
-+	struct sockaddr_in rpcb_inaddr_loopback = {
-+		.sin_family		= AF_INET,
-+		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
-+		.sin_port		= htons(RPCBIND_PORT),
-+	};
-+
- 	/* Construct AF_INET universal address */
- 	snprintf(buf, sizeof(buf),
- 			NIPQUAD_FMT".%u.%u",
-@@ -303,6 +303,12 @@ static int rpcb_register_netid6(struct sockaddr_in6 *address_to_register,
- 	unsigned short port = ntohs(address_to_register->sin6_port);
- 	char buf[64];
- 
-+	struct sockaddr_in6 rpcb_in6addr_loopback = {
-+		.sin6_family		= AF_INET6,
-+		.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
-+		.sin6_port		= htons(RPCBIND_PORT),
-+	};
-+
- 	/* Construct AF_INET6 universal address */
- 	snprintf(buf, sizeof(buf),
- 			NIP6_FMT".%u.%u",
-diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
-index c996671..bb8e0a8 100644
---- a/net/sunrpc/cache.c
-+++ b/net/sunrpc/cache.c
-@@ -316,27 +316,27 @@ static int create_cache_proc_entries(struct cache_detail *cd)
- 	cd->proc_ent->owner = cd->owner;
- 	cd->channel_ent = cd->content_ent = NULL;
- 
--	p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
--			     cd->proc_ent, &cache_flush_operations, cd);
-+	p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
- 	cd->flush_ent = p;
- 	if (p == NULL)
- 		goto out_nomem;
-+	p->proc_fops = &cache_flush_operations;
- 	p->owner = cd->owner;
- 
- 	if (cd->cache_request || cd->cache_parse) {
--		p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
--				     cd->proc_ent, &cache_file_operations, cd);
-+		p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
- 		cd->channel_ent = p;
- 		if (p == NULL)
- 			goto out_nomem;
-+		p->proc_fops = &cache_file_operations;
- 		p->owner = cd->owner;
- 	}
- 	if (cd->cache_show) {
--		p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
--				cd->proc_ent, &content_file_operations, cd);
-+		p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent);
- 		cd->content_ent = p;
- 		if (p == NULL)
- 			goto out_nomem;
-+		p->proc_fops = &content_file_operations;
- 		p->owner = cd->owner;
- 	}
- 	return 0;
-@@ -393,7 +393,8 @@ void cache_unregister(struct cache_detail *cd)
- 	remove_cache_proc_entries(cd);
- 	if (list_empty(&cache_list)) {
- 		/* module must be being unloaded so its safe to kill the worker */
--		cancel_delayed_work_sync(&cache_cleaner);
-+		cancel_delayed_work(&cache_cleaner);
-+		flush_scheduled_work();
- 	}
- 	return;
- out:
-@@ -696,7 +697,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
- {
- 	struct cache_reader *rp = filp->private_data;
- 	struct cache_request *rq;
--	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
- 	int err;
- 
- 	if (count == 0)
-@@ -773,7 +774,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
- 	    loff_t *ppos)
- {
- 	int err;
--	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
- 
- 	if (count == 0)
- 		return 0;
-@@ -804,7 +805,7 @@ cache_poll(struct file *filp, poll_table *wait)
- 	unsigned int mask;
- 	struct cache_reader *rp = filp->private_data;
- 	struct cache_queue *cq;
--	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
- 
- 	poll_wait(filp, &queue_wait, wait);
- 
-@@ -1248,15 +1249,23 @@ static const struct seq_operations cache_content_op = {
- 
- static int content_open(struct inode *inode, struct file *file)
- {
-+	int res;
- 	struct handle *han;
- 	struct cache_detail *cd = PDE(inode)->data;
- 
--	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
-+	han = kmalloc(sizeof(*han), GFP_KERNEL);
- 	if (han == NULL)
- 		return -ENOMEM;
- 
- 	han->cd = cd;
--	return 0;
-+
-+	res = seq_open(file, (struct seq_operations *) &cache_content_op);
-+	if (res)
-+		kfree(han);
-+	else
-+		((struct seq_file *)file->private_data)->private = han;
-+
-+	return res;
- }
- 
- static const struct file_operations content_file_operations = {
-@@ -1269,7 +1278,7 @@ static const struct file_operations content_file_operations = {
- static ssize_t read_flush(struct file *file, char __user *buf,
- 			    size_t count, loff_t *ppos)
- {
--	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
- 	char tbuf[20];
- 	unsigned long p = *ppos;
- 	size_t len;
-@@ -1290,7 +1299,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
- static ssize_t write_flush(struct file * file, const char __user * buf,
- 			     size_t count, loff_t *ppos)
- {
--	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
-+	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
- 	char tbuf[20];
- 	char *ep;
- 	long flushtime;
-diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
-index 23a2b8f..add777b 100644
---- a/net/sunrpc/rpc_pipe.c
-+++ b/net/sunrpc/rpc_pipe.c
-@@ -143,7 +143,8 @@ rpc_close_pipes(struct inode *inode)
- 		rpci->nwriters = 0;
- 		if (ops->release_pipe)
- 			ops->release_pipe(inode);
--		cancel_delayed_work_sync(&rpci->queue_timeout);
-+		cancel_delayed_work(&rpci->queue_timeout);
-+		flush_workqueue(rpciod_workqueue);
- 	}
- 	rpc_inode_setowner(inode, NULL);
- 	mutex_unlock(&inode->i_mutex);
-@@ -224,7 +225,7 @@ out:
- static ssize_t
- rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct rpc_inode *rpci = RPC_I(inode);
- 	struct rpc_pipe_msg *msg;
- 	int res = 0;
-@@ -267,7 +268,7 @@ out_unlock:
- static ssize_t
- rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
- {
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct rpc_inode *rpci = RPC_I(inode);
- 	int res;
- 
-@@ -285,7 +286,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
- 	struct rpc_inode *rpci;
- 	unsigned int mask = 0;
- 
--	rpci = RPC_I(filp->f_path.dentry->d_inode);
-+	rpci = RPC_I(filp->f_dentry->d_inode);
- 	poll_wait(filp, &rpci->waitq, wait);
- 
- 	mask = POLLOUT | POLLWRNORM;
-@@ -300,7 +301,7 @@ static int
- rpc_pipe_ioctl(struct inode *ino, struct file *filp,
- 		unsigned int cmd, unsigned long arg)
- {
--	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
-+	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
- 	int len;
- 
- 	switch (cmd) {
-@@ -471,19 +472,22 @@ static struct dentry_operations rpc_dentry_operations = {
- static int
- rpc_lookup_parent(char *path, struct nameidata *nd)
- {
--	struct vfsmount *mnt;
--
- 	if (path[0] == '\0')
- 		return -ENOENT;
- 
--	mnt = rpc_get_mount();
--	if (IS_ERR(mnt)) {
-+	nd->mnt = rpc_get_mount();
-+	if (IS_ERR(nd->mnt)) {
- 		printk(KERN_WARNING "%s: %s failed to mount "
- 			       "pseudofilesystem \n", __FILE__, __func__);
--		return PTR_ERR(mnt);
-+		return PTR_ERR(nd->mnt);
- 	}
-+	mntget(nd->mnt);
-+	nd->dentry = dget(rpc_mount->mnt_root);
-+	nd->last_type = LAST_ROOT;
-+	nd->flags = LOOKUP_PARENT;
-+	nd->depth = 0;
- 
--	if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
-+	if (path_walk(path, nd)) {
- 		printk(KERN_WARNING "%s: %s failed to find path %s\n",
- 				__FILE__, __func__, path);
- 		rpc_put_mount();
-@@ -495,7 +499,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
- static void
- rpc_release_path(struct nameidata *nd)
- {
--	path_put(&nd->path);
-+	path_release(nd);
- 	rpc_put_mount();
- }
- 
-@@ -668,7 +672,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
- 
- 	if ((error = rpc_lookup_parent(path, nd)) != 0)
- 		return ERR_PTR(error);
--	dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
-+	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len,
- 				   1);
- 	if (IS_ERR(dentry))
- 		rpc_release_path(nd);
-@@ -696,7 +700,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
- 	dentry = rpc_lookup_negative(path, &nd);
- 	if (IS_ERR(dentry))
- 		return dentry;
--	dir = nd.path.dentry->d_inode;
-+	dir = nd.dentry->d_inode;
- 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
- 		goto err_dput;
- 	RPC_I(dentry->d_inode)->private = rpc_client;
-diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
-index 50b049c..819a01e 100644
---- a/net/sunrpc/stats.c
-+++ b/net/sunrpc/stats.c
-@@ -224,10 +224,16 @@ EXPORT_SYMBOL_GPL(rpc_print_iostats);
- static inline struct proc_dir_entry *
- do_register(const char *name, void *data, const struct file_operations *fops)
- {
-+	struct proc_dir_entry *ent;
- 	rpc_proc_init();
- 	dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
- 
--	return proc_create_data(name, 0, proc_net_rpc, fops, data);
-+	ent = create_proc_entry(name, 0, proc_net_rpc);
-+	if (ent) {
-+		ent->proc_fops = fops;
-+		ent->data = data;
-+	}
-+	return ent;
- }
- 
- struct proc_dir_entry *
-@@ -264,7 +270,7 @@ rpc_proc_init(void)
- 	dprintk("RPC:       registering /proc/net/rpc\n");
- 	if (!proc_net_rpc) {
- 		struct proc_dir_entry *ent;
--		ent = proc_mkdir("rpc", init_net.proc_net);
-+		ent = proc_mkdir("rpc", proc_net);
- 		if (ent) {
- 			ent->owner = THIS_MODULE;
- 			proc_net_rpc = ent;
-@@ -278,7 +284,7 @@ rpc_proc_exit(void)
- 	dprintk("RPC:       unregistering /proc/net/rpc\n");
- 	if (proc_net_rpc) {
- 		proc_net_rpc = NULL;
--		remove_proc_entry("rpc", init_net.proc_net);
-+		remove_proc_entry("net/rpc", NULL);
- 	}
- }
- 
-diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
-index 5231f7a..30155e1 100644
---- a/net/sunrpc/sysctl.c
-+++ b/net/sunrpc/sysctl.c
-@@ -44,7 +44,7 @@ void
- rpc_register_sysctl(void)
- {
- 	if (!sunrpc_table_header)
--		sunrpc_table_header = register_sysctl_table(sunrpc_table);
-+		sunrpc_table_header = register_sysctl_table(sunrpc_table, 0);
- }
- 
- void
-diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
-index 853a414..71ba862 100644
---- a/net/sunrpc/auth_gss/auth_gss.c
-+++ b/net/sunrpc/auth_gss/auth_gss.c
-@@ -481,7 +481,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
- 	const void *p, *end;
- 	void *buf;
- 	struct gss_upcall_msg *gss_msg;
--	struct inode *inode = filp->f_path.dentry->d_inode;
-+	struct inode *inode = filp->f_dentry->d_inode;
- 	struct gss_cl_ctx *ctx;
- 	uid_t uid;
- 	ssize_t err = -EFBIG;
-diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
-index f160be6..5551644 100644
---- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
-+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
-@@ -41,7 +41,7 @@
- #endif
- 
- s32
--krb5_make_seq_num(struct crypto_blkcipher *key,
-+krb5_make_seq_num(struct crypto_tfm *key,
- 		int direction,
- 		u32 seqnum,
- 		unsigned char *cksum, unsigned char *buf)
-@@ -62,7 +62,7 @@ krb5_make_seq_num(struct crypto_blkcipher *key,
- }
- 
- s32
--krb5_get_seq_num(struct crypto_blkcipher *key,
-+krb5_get_seq_num(struct crypto_tfm *key,
- 	       unsigned char *cksum,
- 	       unsigned char *buf,
- 	       int *direction, u32 *seqnum)
-diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
-index ef45eba..43a6ef6 100644
---- a/net/sunrpc/auth_gss/gss_krb5_mech.c
-+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
-@@ -34,7 +34,6 @@
-  *
-  */
- 
--#include <linux/err.h>
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/types.h>
-@@ -78,10 +77,10 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
- }
- 
- static inline const void *
--get_key(const void *p, const void *end, struct crypto_blkcipher **res)
-+get_key(const void *p, const void *end, struct crypto_tfm **res)
- {
- 	struct xdr_netobj	key;
--	int			alg;
-+	int			alg, alg_mode;
- 	char			*alg_name;
- 
- 	p = simple_get_bytes(p, end, &alg, sizeof(alg));
-@@ -93,19 +92,18 @@ get_key(const void *p, const void *end, struct crypto_blkcipher **res)
- 
- 	switch (alg) {
- 		case ENCTYPE_DES_CBC_RAW:
--			alg_name = "cbc(des)";
-+			alg_name = "des";
-+			alg_mode = CRYPTO_TFM_MODE_CBC;
- 			break;
- 		default:
- 			printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
- 			goto out_err_free_key;
- 	}
--	*res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
--	if (IS_ERR(*res)) {
-+	if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) {
- 		printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
--		*res = NULL;
- 		goto out_err_free_key;
- 	}
--	if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
-+	if (crypto_cipher_setkey(*res, key.data, key.len)) {
- 		printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
- 		goto out_err_free_tfm;
- 	}
-@@ -114,7 +112,7 @@ get_key(const void *p, const void *end, struct crypto_blkcipher **res)
- 	return p;
- 
- out_err_free_tfm:
--	crypto_free_blkcipher(*res);
-+	crypto_free_tfm(*res);
- out_err_free_key:
- 	kfree(key.data);
- 	p = ERR_PTR(-EINVAL);
-@@ -184,9 +182,9 @@ gss_import_sec_context_kerberos(const void *p,
- 	return 0;
- 
- out_err_free_key2:
--	crypto_free_blkcipher(ctx->seq);
-+	crypto_free_tfm(ctx->seq);
- out_err_free_key1:
--	crypto_free_blkcipher(ctx->enc);
-+	crypto_free_tfm(ctx->enc);
- out_err_free_mech:
- 	kfree(ctx->mech_used.data);
- out_err_free_ctx:
-@@ -199,8 +197,8 @@ static void
- gss_delete_sec_context_kerberos(void *internal_ctx) {
- 	struct krb5_ctx *kctx = internal_ctx;
- 
--	crypto_free_blkcipher(kctx->seq);
--	crypto_free_blkcipher(kctx->enc);
-+	crypto_free_tfm(kctx->seq);
-+	crypto_free_tfm(kctx->enc);
- 	kfree(kctx->mech_used.data);
- 	kfree(kctx);
- }
-diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
-index ae8e69b..84ff86d 100644
---- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
-+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
-@@ -146,7 +146,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
- 
- 	now = get_seconds();
- 
--	blocksize = crypto_blkcipher_blocksize(kctx->enc);
-+	blocksize = crypto_tfm_alg_blocksize(kctx->enc);
- 	gss_krb5_add_padding(buf, offset, blocksize);
- 	BUG_ON((buf->len - offset) % blocksize);
- 	plainlen = blocksize + buf->len - offset;
-@@ -287,7 +287,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
- 	/* Copy the data back to the right position.  XXX: Would probably be
- 	 * better to copy and encrypt at the same time. */
- 
--	blocksize = crypto_blkcipher_blocksize(kctx->enc);
-+	blocksize = crypto_tfm_alg_blocksize(kctx->enc);
- 	data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize;
- 	orig_start = buf->head[0].iov_base + offset;
- 	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
-diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
-index c93fca2..ed58bcc 100644
---- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
-+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
-@@ -51,7 +51,7 @@
- 
- u32
- krb5_encrypt(
--	struct crypto_blkcipher *tfm,
-+	struct crypto_tfm *tfm,
- 	void * iv,
- 	void * in,
- 	void * out,
-@@ -60,24 +60,23 @@ krb5_encrypt(
- 	u32 ret = -EINVAL;
- 	struct scatterlist sg[1];
- 	u8 local_iv[16] = {0};
--	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
- 
--	if (length % crypto_blkcipher_blocksize(tfm) != 0)
-+	if (length % crypto_tfm_alg_blocksize(tfm) != 0)
- 		goto out;
- 
--	if (crypto_blkcipher_ivsize(tfm) > 16) {
-+	if (crypto_tfm_alg_ivsize(tfm) > 16) {
- 		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
--			crypto_blkcipher_ivsize(tfm));
-+			crypto_tfm_alg_ivsize(tfm));
- 		goto out;
- 	}
- 
- 	if (iv)
--		memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
-+		memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
- 
- 	memcpy(out, in, length);
- 	sg_init_one(sg, out, length);
- 
--	ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
-+	ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv);
- out:
- 	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
- 	return ret;
-@@ -85,7 +84,7 @@ out:
- 
- u32
- krb5_decrypt(
--     struct crypto_blkcipher *tfm,
-+     struct crypto_tfm *tfm,
-      void * iv,
-      void * in,
-      void * out,
-@@ -94,23 +93,22 @@ krb5_decrypt(
- 	u32 ret = -EINVAL;
- 	struct scatterlist sg[1];
- 	u8 local_iv[16] = {0};
--	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
- 
--	if (length % crypto_blkcipher_blocksize(tfm) != 0)
-+	if (length % crypto_tfm_alg_blocksize(tfm) != 0)
- 		goto out;
- 
--	if (crypto_blkcipher_ivsize(tfm) > 16) {
-+	if (crypto_tfm_alg_ivsize(tfm) > 16) {
- 		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
--			crypto_blkcipher_ivsize(tfm));
-+			crypto_tfm_alg_ivsize(tfm));
- 		goto out;
- 	}
- 	if (iv)
--		memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
-+		memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm));
- 
- 	memcpy(out, in, length);
- 	sg_init_one(sg, out, length);
- 
--	ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
-+	ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv);
- out:
- 	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
- 	return ret;
-@@ -119,9 +117,11 @@ out:
- static int
- checksummer(struct scatterlist *sg, void *data)
- {
--	struct hash_desc *desc = data;
-+	struct crypto_tfm *tfm = (struct crypto_tfm *)data;
- 
--	return crypto_hash_update(desc, sg, sg->length);
-+	crypto_digest_update(tfm, sg, 1);
-+
-+	return 0;
- }
- 
- /* checksum the plaintext data and hdrlen bytes of the token header */
-@@ -129,37 +129,26 @@ s32
- make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
- 		   int body_offset, struct xdr_netobj *cksum)
- {
--	struct hash_desc                desc; /* XXX add to ctx? */
-+	struct crypto_tfm               *tfm = NULL; /* XXX add to ctx? */
- 	struct scatterlist              sg[1];
--	int err;
- 
--	desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
--	if (IS_ERR(desc.tfm))
-+	if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP)))
- 		return GSS_S_FAILURE;
--	cksum->len = crypto_hash_digestsize(desc.tfm);
--	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-+	cksum->len = crypto_tfm_alg_digestsize(tfm);
- 
--	err = crypto_hash_init(&desc);
--	if (err)
--		goto out;
-+	crypto_digest_init(tfm);
- 	sg_init_one(sg, header, hdrlen);
--	err = crypto_hash_update(&desc, sg, hdrlen);
--	if (err)
--		goto out;
--	err = xdr_process_buf(body, body_offset, body->len - body_offset,
--			      checksummer, &desc);
--	if (err)
--		goto out;
--	err = crypto_hash_final(&desc, cksum->data);
--
--out:
--	crypto_free_hash(desc.tfm);
--	return err ? GSS_S_FAILURE : 0;
-+	crypto_digest_update(tfm, sg, 1);
-+	xdr_process_buf(body, body_offset, body->len - body_offset,
-+			checksummer, tfm);
-+	crypto_digest_final(tfm, cksum->data);
-+	crypto_free_tfm(tfm);
-+	return 0;
- }
- 
- struct encryptor_desc {
- 	u8 iv[8]; /* XXX hard-coded blocksize */
--	struct blkcipher_desc desc;
-+	struct crypto_tfm *tfm;
- 	int pos;
- 	struct xdr_buf *outbuf;
- 	struct page **pages;
-@@ -205,11 +194,11 @@ encryptor(struct scatterlist *sg, void *data)
- 	if (thislen == 0)
- 		return 0;
- 
--	sg_mark_end(&desc->infrags[desc->fragno - 1]);
-+/*	sg_mark_end(&desc->infrags[desc->fragno - 1]);
- 	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
--
--	ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
--					  desc->infrags, thislen);
-+*/
-+	ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags,
-+					thislen, desc->iv);
- 	if (ret)
- 		return ret;
- 
-@@ -231,18 +220,16 @@ encryptor(struct scatterlist *sg, void *data)
- }
- 
- int
--gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
--		    int offset, struct page **pages)
-+gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset,
-+		struct page **pages)
- {
- 	int ret;
- 	struct encryptor_desc desc;
- 
--	BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
-+	BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
- 
- 	memset(desc.iv, 0, sizeof(desc.iv));
--	desc.desc.tfm = tfm;
--	desc.desc.info = desc.iv;
--	desc.desc.flags = 0;
-+	desc.tfm = tfm;
- 	desc.pos = offset;
- 	desc.outbuf = buf;
- 	desc.pages = pages;
-@@ -258,7 +245,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
- 
- struct decryptor_desc {
- 	u8 iv[8]; /* XXX hard-coded blocksize */
--	struct blkcipher_desc desc;
-+	struct crypto_tfm *tfm;
- 	struct scatterlist frags[4];
- 	int fragno;
- 	int fraglen;
-@@ -285,10 +272,10 @@ decryptor(struct scatterlist *sg, void *data)
- 	if (thislen == 0)
- 		return 0;
- 
--	sg_mark_end(&desc->frags[desc->fragno - 1]);
--
--	ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
--					  desc->frags, thislen);
-+/*	sg_mark_end(&desc->frags[desc->fragno - 1]);
-+*/
-+	ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags,
-+					thislen, desc->iv);
- 	if (ret)
- 		return ret;
- 
-@@ -307,18 +294,15 @@ decryptor(struct scatterlist *sg, void *data)
- }
- 
- int
--gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
--		    int offset)
-+gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset)
- {
- 	struct decryptor_desc desc;
- 
- 	/* XXXJBF: */
--	BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
-+	BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
- 
- 	memset(desc.iv, 0, sizeof(desc.iv));
--	desc.desc.tfm = tfm;
--	desc.desc.info = desc.iv;
--	desc.desc.flags = 0;
-+	desc.tfm = tfm;
- 	desc.fragno = 0;
- 	desc.fraglen = 0;
- 
-diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
-index c832712..669b7e2 100644
---- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
-+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
-@@ -162,10 +162,10 @@ make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
- 	desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
- 	if (IS_ERR(desc.tfm))
- 		return GSS_S_FAILURE;
--	cksum->len = crypto_hash_digestsize(desc.tfm);
-+	cksum->len = crypto_tfm_alg_digestsize(desc.tfm);
- 	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- 
--	err = crypto_hash_setkey(desc.tfm, key->data, key->len);
-+	err = crypto_digest_setkey(desc.tfm, key->data, key->len);
- 	if (err)
- 		goto out;
- 
-diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
-index 8710117..0390210 100644
---- a/net/sunrpc/xprtrdma/svc_rdma.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma.c
-@@ -260,7 +260,7 @@ int svc_rdma_init(void)
- 	dprintk("\tmax_inline       : %d\n", svcrdma_max_req_size);
- 	if (!svcrdma_table_header)
- 		svcrdma_table_header =
--			register_sysctl_table(svcrdma_root_table);
-+			register_sysctl_table(svcrdma_root_table, 0);
- 
- 	/* Create the temporary map cache */
- 	svc_rdma_map_cachep = kmem_cache_create("svc_rdma_map_cache",
-diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
-index a564c1a..5dfafc8 100644
---- a/net/sunrpc/xprtrdma/transport.c
-+++ b/net/sunrpc/xprtrdma/transport.c
-@@ -804,7 +804,7 @@ static int __init xprt_rdma_init(void)
- 
- #ifdef RPC_DEBUG
- 	if (!sunrpc_table_header)
--		sunrpc_table_header = register_sysctl_table(sunrpc_table);
-+		sunrpc_table_header = register_sysctl_table(sunrpc_table, 0);
- #endif
- 	return 0;
- }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,13 +5,13 @@
 
 Signed-off-by: Doron Shoham <dorons at voltaire.com>
 ---
- drivers/scsi/scsi_transport_iscsi.c |   95 ++++++++++++++++++++----------------
- 1 file changed, 55 insertions(+), 40 deletions(-)
+ drivers/scsi/scsi_transport_iscsi.c |   97 +++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 40 deletions(-)
 
-Index: ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+Index: ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 ===================================================================
---- ofed_kernel.orig/drivers/scsi/scsi_transport_iscsi.c
-+++ ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+--- ofa_kernel-1.4.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 @@ -20,6 +20,8 @@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -21,7 +21,18 @@
  #include <linux/module.h>
  #include <linux/mutex.h>
  #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(stru
+@@ -378,8 +380,10 @@ static void __iscsi_unblock_session(stru
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ 	struct iscsi_host *ihost = shost->shost_data;
++#endif
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -397,10 +401,12 @@ static void __iscsi_unblock_session(stru
  	 * the async scanning code (drivers like iscsi_tcp do login and
  	 * scanning from userspace).
  	 */
@@ -38,7 +49,7 @@
  }
  
  /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+@@ -1294,45 +1300,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
   * Malformed skbs with wrong lengths or invalid creds are not processed.
   */
  static void
@@ -129,7 +140,7 @@
  	}
  	mutex_unlock(&rx_queue_mutex);
  }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(v
+@@ -1738,7 +1755,7 @@ static __init int iscsi_transport_init(v
  	return 0;
  
  release_nls:
@@ -138,7 +149,7 @@
  unregister_session_class:
  	transport_class_unregister(&iscsi_session_class);
  unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
+@@ -1753,7 +1770,7 @@ unregister_transport_class:
  static void __exit iscsi_transport_exit(void)
  {
  	destroy_workqueue(iscsi_eh_timer_workq);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/rnfs_fs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/rnfs_fs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.2/rnfs_fs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,2808 @@
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index cc91227..262397b 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -361,11 +361,14 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	const struct export_operations *nop = mnt->mnt_sb->s_export_op;
+ 	struct dentry *result, *alias;
+ 	int err;
++	__u32 objp[2];
+ 
++	objp[0] = fid->i32.ino;
++	objp[1] = fid->i32.gen;
+ 	/*
+ 	 * Try to get any dentry for the given file handle from the filesystem.
+ 	 */
+-	result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
++	result = nop->get_dentry(mnt->mnt_sb, &objp);
+ 	if (!result)
+ 		result = ERR_PTR(-ESTALE);
+ 	if (IS_ERR(result))
+@@ -417,11 +420,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 		 * file handle.  If this fails we'll have to give up.
+ 		 */
+ 		err = -ESTALE;
+-		if (!nop->fh_to_parent)
++		if (!nop->get_parent)
+ 			goto err_result;
+ 
+-		target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
+-				fh_len, fileid_type);
++		target_dir = nop->get_parent(result);
+ 		if (!target_dir)
+ 			goto err_result;
+ 		err = PTR_ERR(target_dir);
+diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
+index 0b45fd3..2c45814 100644
+--- a/fs/lockd/clntlock.c
++++ b/fs/lockd/clntlock.c
+@@ -168,7 +168,7 @@ __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock
+ 			continue;
+ 		if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
+ 			continue;
+-		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
++		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode), fh) != 0)
+ 			continue;
+ 		/* Alright, we found a lock. Set the return status
+ 		 * and wake up the caller
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 31668b6..8c72d30 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -128,12 +128,12 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
+ 
+ 	nlmclnt_next_cookie(&argp->cookie);
+ 	argp->state   = nsm_local_state;
+-	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
+-	lock->caller  = utsname()->nodename;
++	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
++	lock->caller  = system_utsname.nodename;
+ 	lock->oh.data = req->a_owner;
+ 	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
+ 				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
+-				utsname()->nodename);
++				system_utsname.nodename);
+ 	lock->svid = fl->fl_u.nfs_fl.owner->pid;
+ 	lock->fl.fl_start = fl->fl_start;
+ 	lock->fl.fl_end = fl->fl_end;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index e4d5635..771edc1 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -194,7 +194,7 @@ static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
+  */
+ static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
+ {
+-	p = xdr_encode_nsm_string(p, utsname()->nodename);
++	p = xdr_encode_nsm_string(p, system_utsname.nodename);
+ 	if (!p)
+ 		return ERR_PTR(-EIO);
+ 
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index cf0d5c2..a353cf5 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -304,7 +304,7 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
+ {
+ 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
+ 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
+-	call->a_args.lock.caller = utsname()->nodename;
++	call->a_args.lock.caller = system_utsname.nodename;
+ 	call->a_args.lock.oh.len = lock->oh.len;
+ 
+ 	/* set default data area */
+@@ -367,8 +367,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 	__be32			ret;
+ 
+ 	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_type, lock->fl.fl_pid,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end,
+@@ -417,11 +417,18 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			ret = nlm_granted;
+ 			goto out;
+ 		case -EAGAIN:
+-			ret = nlm_lck_denied;
+-			goto out;
++			if (wait) {
++				ret = nlm_lck_blocked;
++				break;
++			} else {
++				ret = nlm_lck_denied;
++				goto out;
++			}
+ 		case FILE_LOCK_DEFERRED:
+-			if (wait)
++			if (wait) {
++				ret = nlm_lck_blocked;
+ 				break;
++			}
+ 			/* Filesystem lock operation is in progress
+ 			   Add it to the queue waiting for callback */
+ 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
+@@ -434,8 +441,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			goto out;
+ 	}
+ 
+-	ret = nlm_lck_blocked;
+-
+ 	/* Append to list of blocked */
+ 	nlmsvc_insert_block(block, NLM_NEVER);
+ out:
+@@ -458,8 +463,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 	__be32			ret;
+ 
+ 	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_type,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end);
+@@ -547,8 +552,8 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
+ 	int	error;
+ 
+ 	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_pid,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end);
+@@ -576,8 +581,8 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
+ 	int status = 0;
+ 
+ 	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_pid,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end);
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index 198b4e5..2109091 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -45,7 +45,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
+ 
+ static inline void nlm_debug_print_file(char *msg, struct nlm_file *file)
+ {
+-	struct inode *inode = file->f_file->f_path.dentry->d_inode;
++	struct inode *inode = file->f_file->f_dentry->d_inode;
+ 
+ 	dprintk("lockd: %s %s/%ld\n",
+ 		msg, inode->i_sb->s_id, inode->i_ino);
+@@ -396,7 +396,7 @@ nlmsvc_match_sb(void *datap, struct nlm_file *file)
+ {
+ 	struct super_block *sb = datap;
+ 
+-	return sb == file->f_file->f_path.mnt->mnt_sb;
++	return sb == file->f_file->f_vfsmnt->mnt_sb;
+ }
+ 
+ /**
+diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
+index ac6170c..78eca38 100644
+--- a/fs/nfs/Makefile
++++ b/fs/nfs/Makefile
+@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
+ 
+ nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
+ 			   direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+-			   write.o namespace.o mount_clnt.o
++			   write.o namespace.o mount_clnt.o backport-namespace.o \
++			   backport-writeback.o
+ nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
+ nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
+ nfs-$(CONFIG_NFS_V3_ACL)	+= nfs3acl.o
+diff --git a/fs/nfs/backport-namespace.c b/fs/nfs/backport-namespace.c
+new file mode 100644
+index 0000000..de57f8b
+--- /dev/null
++++ b/fs/nfs/backport-namespace.c
+@@ -0,0 +1 @@
++#include "src/namespace.c"
+diff --git a/fs/nfs/backport-writeback.c b/fs/nfs/backport-writeback.c
+new file mode 100644
+index 0000000..b838ead
+--- /dev/null
++++ b/fs/nfs/backport-writeback.c
+@@ -0,0 +1 @@
++#include "src/writeback.c"
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 5ee23e7..afbb834 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -248,6 +248,7 @@ static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
+ 				(const struct sockaddr_in6 *)sa2);
+ 	}
+ 	BUG();
++	return -EINVAL;
+ }
+ 
+ /*
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 74f92b7..90d0a97 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -66,7 +66,7 @@ const struct file_operations nfs_dir_operations = {
+ 	.fsync		= nfs_fsync_dir,
+ };
+ 
+-const struct inode_operations nfs_dir_inode_operations = {
++struct inode_operations nfs_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -82,7 +82,7 @@ const struct inode_operations nfs_dir_inode_operations = {
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_dir_inode_operations = {
++struct inode_operations nfs3_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -105,7 +105,7 @@ const struct inode_operations nfs3_dir_inode_operations = {
+ #ifdef CONFIG_NFS_V4
+ 
+ static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
+-const struct inode_operations nfs4_dir_inode_operations = {
++struct inode_operations nfs4_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_atomic_lookup,
+ 	.link		= nfs_link,
+@@ -134,8 +134,8 @@ nfs_opendir(struct inode *inode, struct file *filp)
+ 	int res;
+ 
+ 	dfprintk(FILE, "NFS: open dir(%s/%s)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name);
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name);
+ 
+ 	nfs_inc_stats(inode, NFSIOS_VFSOPEN);
+ 
+@@ -175,7 +175,7 @@ static
+ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
+ {
+ 	struct file	*file = desc->file;
+-	struct inode	*inode = file->f_path.dentry->d_inode;
++	struct inode	*inode = file->f_dentry->d_inode;
+ 	struct rpc_cred	*cred = nfs_file_cred(file);
+ 	unsigned long	timestamp;
+ 	int		error;
+@@ -186,7 +186,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
+ 
+  again:
+ 	timestamp = jiffies;
+-	error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, desc->entry->cookie, page,
++	error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->entry->cookie, page,
+ 					  NFS_SERVER(inode)->dtsize, desc->plus);
+ 	if (error < 0) {
+ 		/* We requested READDIRPLUS, but the server doesn't grok it */
+@@ -311,7 +311,7 @@ int find_dirent_index(nfs_readdir_descriptor_t *desc)
+ static inline
+ int find_dirent_page(nfs_readdir_descriptor_t *desc)
+ {
+-	struct inode	*inode = desc->file->f_path.dentry->d_inode;
++	struct inode	*inode = desc->file->f_dentry->d_inode;
+ 	struct page	*page;
+ 	int		status;
+ 
+@@ -467,7 +467,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+ 		     filldir_t filldir)
+ {
+ 	struct file	*file = desc->file;
+-	struct inode	*inode = file->f_path.dentry->d_inode;
++	struct inode	*inode = file->f_dentry->d_inode;
+ 	struct rpc_cred	*cred = nfs_file_cred(file);
+ 	struct page	*page = NULL;
+ 	int		status;
+@@ -482,7 +482,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+ 		goto out;
+ 	}
+ 	timestamp = jiffies;
+-	status = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred,
++	status = NFS_PROTO(inode)->readdir(file->f_dentry, cred,
+ 						*desc->dir_cookie, page,
+ 						NFS_SERVER(inode)->dtsize,
+ 						desc->plus);
+@@ -520,7 +520,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+  */
+ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ {
+-	struct dentry	*dentry = filp->f_path.dentry;
++	struct dentry	*dentry = filp->f_dentry;
+ 	struct inode	*inode = dentry->d_inode;
+ 	nfs_readdir_descriptor_t my_desc,
+ 			*desc = &my_desc;
+@@ -601,7 +601,7 @@ out:
+ 
+ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
+ {
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 	struct inode *inode = dentry->d_inode;
+ 
+ 	dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
+@@ -973,7 +973,7 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd)
+ 	if (nd->flags & LOOKUP_DIRECTORY)
+ 		return 0;
+ 	/* Are we trying to write to a read only partition? */
+-	if (__mnt_is_readonly(nd->path.mnt) &&
++	if (__mnt_is_readonly(nd->mnt) &&
+ 	    (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
+ 		return 0;
+ 	return 1;
+@@ -1083,7 +1083,7 @@ no_open:
+ 
+ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
+ {
+-	struct dentry *parent = desc->file->f_path.dentry;
++	struct dentry *parent = desc->file->f_dentry;
+ 	struct inode *dir = parent->d_inode;
+ 	struct nfs_entry *entry = desc->entry;
+ 	struct dentry *dentry, *alias;
+@@ -1907,7 +1907,7 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+ 	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
+ }
+ 
+-int nfs_permission(struct inode *inode, int mask)
++int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+ {
+ 	struct rpc_cred *cred;
+ 	int res = 0;
+@@ -1917,7 +1917,7 @@ int nfs_permission(struct inode *inode, int mask)
+ 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
+ 		goto out;
+ 	/* Is this sys_access() ? */
+-	if (mask & MAY_ACCESS)
++	if (nd != NULL && (nd->flags & LOOKUP_ACCESS))
+ 		goto force_lookup;
+ 
+ 	switch (inode->i_mode & S_IFMT) {
+@@ -1926,7 +1926,8 @@ int nfs_permission(struct inode *inode, int mask)
+ 		case S_IFREG:
+ 			/* NFSv4 has atomic_open... */
+ 			if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
+-					&& (mask & MAY_OPEN))
++					&& nd != NULL
++					&& (nd->flags & LOOKUP_OPEN))
+ 				goto out;
+ 			break;
+ 		case S_IFDIR:
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 08f6b04..91f5069 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -116,7 +116,7 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
+ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ {
+ 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
+-			iocb->ki_filp->f_path.dentry->d_name.name,
++			iocb->ki_filp->f_dentry->d_name.name,
+ 			(long long) pos, nr_segs);
+ 
+ 	return -EINVAL;
+@@ -891,8 +891,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
+ 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
+ 
+ 	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name,
+ 		count, (long long) pos);
+ 
+ 	retval = 0;
+@@ -948,8 +948,8 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+ 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
+ 
+ 	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name,
+ 		count, (long long) pos);
+ 
+ 	retval = generic_write_checks(file, &pos, &count, 0);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 7846065..affdd75 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -45,16 +45,13 @@ static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
+ static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
+ 					struct pipe_inode_info *pipe,
+ 					size_t count, unsigned int flags);
+-static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
+-				unsigned long nr_segs, loff_t pos);
+-static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
+-				unsigned long nr_segs, loff_t pos);
++static ssize_t nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos);
++static ssize_t nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos);
+ static int  nfs_file_flush(struct file *, fl_owner_t id);
+ static int  nfs_file_fsync(struct file *, struct dentry *dentry, int datasync);
+ static int nfs_check_flags(int flags);
+ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
+ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
+ 
+ static struct vm_operations_struct nfs_file_vm_ops;
+ 
+@@ -77,17 +74,16 @@ const struct file_operations nfs_file_operations = {
+ 	.flock		= nfs_flock,
+ 	.splice_read	= nfs_file_splice_read,
+ 	.check_flags	= nfs_check_flags,
+-	.setlease	= nfs_setlease,
+ };
+ 
+-const struct inode_operations nfs_file_inode_operations = {
++struct inode_operations nfs_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_file_inode_operations = {
++struct inode_operations nfs3_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+@@ -120,8 +116,8 @@ nfs_file_open(struct inode *inode, struct file *filp)
+ 	int res;
+ 
+ 	dprintk("NFS: open file(%s/%s)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name);
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name);
+ 
+ 	res = nfs_check_flags(filp->f_flags);
+ 	if (res)
+@@ -135,7 +131,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
+ static int
+ nfs_file_release(struct inode *inode, struct file *filp)
+ {
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 
+ 	dprintk("NFS: release(%s/%s)\n",
+ 			dentry->d_parent->d_name.name,
+@@ -178,11 +174,9 @@ force_reval:
+ 
+ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ {
+-	loff_t loff;
+-
+ 	dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name,
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name,
+ 			offset, origin);
+ 
+ 	/* origin == SEEK_END => we must revalidate the cached file length */
+@@ -192,10 +186,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ 		if (retval < 0)
+ 			return (loff_t)retval;
+ 	}
+-	lock_kernel();	/* BKL needed? */
+-	loff = generic_file_llseek_unlocked(filp, offset, origin);
+-	unlock_kernel();
+-	return loff;
++	return remote_llseek(filp, offset, origin);
+ }
+ 
+ /*
+@@ -230,7 +221,7 @@ static int
+ nfs_file_flush(struct file *file, fl_owner_t id)
+ {
+ 	struct nfs_open_context *ctx = nfs_file_open_context(file);
+-	struct dentry	*dentry = file->f_path.dentry;
++	struct dentry	*dentry = file->f_dentry;
+ 	struct inode	*inode = dentry->d_inode;
+ 	int		status;
+ 
+@@ -250,16 +241,15 @@ nfs_file_flush(struct file *file, fl_owner_t id)
+ }
+ 
+ static ssize_t
+-nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
+-		unsigned long nr_segs, loff_t pos)
++nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos)
+ {
+-	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
++	struct dentry * dentry = iocb->ki_filp->f_dentry;
+ 	struct inode * inode = dentry->d_inode;
+ 	ssize_t result;
+-	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
+ 
+ 	if (iocb->ki_filp->f_flags & O_DIRECT)
+-		return nfs_file_direct_read(iocb, iov, nr_segs, pos);
++		return nfs_file_direct_read(iocb, &local_iov, 1, pos);
+ 
+ 	dprintk("NFS: read(%s/%s, %lu@%lu)\n",
+ 		dentry->d_parent->d_name.name, dentry->d_name.name,
+@@ -268,7 +258,7 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
+ 	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
+ 	nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count);
+ 	if (!result)
+-		result = generic_file_aio_read(iocb, iov, nr_segs, pos);
++		result = generic_file_aio_read(iocb, buf, count, pos);
+ 	return result;
+ }
+ 
+@@ -277,7 +267,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
+ 		     struct pipe_inode_info *pipe, size_t count,
+ 		     unsigned int flags)
+ {
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 	struct inode *inode = dentry->d_inode;
+ 	ssize_t res;
+ 
+@@ -294,7 +284,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
+ static int
+ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+ {
+-	struct dentry *dentry = file->f_path.dentry;
++	struct dentry *dentry = file->f_dentry;
+ 	struct inode *inode = dentry->d_inode;
+ 	int	status;
+ 
+@@ -337,44 +327,15 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
+  * If the writer ends up delaying the write, the writer needs to
+  * increment the page use counts until he is done with the page.
+  */
+-static int nfs_write_begin(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len, unsigned flags,
+-			struct page **pagep, void **fsdata)
++static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+ {
+-	int ret;
+-	pgoff_t index;
+-	struct page *page;
+-	index = pos >> PAGE_CACHE_SHIFT;
+-
+-	dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
+-		mapping->host->i_ino, len, (long long) pos);
+-
+-	page = __grab_cache_page(mapping, index);
+-	if (!page)
+-		return -ENOMEM;
+-	*pagep = page;
+-
+-	ret = nfs_flush_incompatible(file, page);
+-	if (ret) {
+-		unlock_page(page);
+-		page_cache_release(page);
+-	}
+-	return ret;
++	return nfs_flush_incompatible(file, page);
+ }
+ 
+-static int nfs_write_end(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len, unsigned copied,
+-			struct page *page, void *fsdata)
++static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+ {
+-	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ 	int status;
+-
+-	dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
+-		mapping->host->i_ino, len, (long long) pos);
++	unsigned copied = to - offset;
+ 
+ 	/*
+ 	 * Zero any uninitialised parts of the page, and then mark the page
+@@ -382,14 +343,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
+-		unsigned end = offset + len;
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
+-					end, PAGE_CACHE_SIZE);
++					to, PAGE_CACHE_SIZE);
+ 			SetPageUptodate(page);
+-		} else if (end >= pglen) {
+-			zero_user_segment(page, end, PAGE_CACHE_SIZE);
++		} else if (to >= pglen) {
++			zero_user_segment(page, to, PAGE_CACHE_SIZE);
+ 			if (offset == 0)
+ 				SetPageUptodate(page);
+ 		} else
+@@ -398,9 +358,6 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 
+ 	status = nfs_updatepage(file, page, offset, copied);
+ 
+-	unlock_page(page);
+-	page_cache_release(page);
+-
+ 	if (status < 0)
+ 		return status;
+ 	return copied;
+@@ -424,34 +381,23 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+ 	return 0;
+ }
+ 
+-static int nfs_launder_page(struct page *page)
+-{
+-	struct inode *inode = page->mapping->host;
+-
+-	dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
+-		inode->i_ino, (long long)page_offset(page));
+-
+-	return nfs_wb_page(inode, page);
+-}
+-
+ const struct address_space_operations nfs_file_aops = {
+ 	.readpage = nfs_readpage,
+ 	.readpages = nfs_readpages,
+ 	.set_page_dirty = __set_page_dirty_nobuffers,
+ 	.writepage = nfs_writepage,
+ 	.writepages = nfs_writepages,
+-	.write_begin = nfs_write_begin,
+-	.write_end = nfs_write_end,
++	.prepare_write = nfs_prepare_write,
++	.commit_write = nfs_commit_write,
+ 	.invalidatepage = nfs_invalidate_page,
+ 	.releasepage = nfs_release_page,
+ 	.direct_IO = nfs_direct_IO,
+-	.launder_page = nfs_launder_page,
+ };
+ 
+ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+ {
+ 	struct file *filp = vma->vm_file;
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 	unsigned pagelen;
+ 	int ret = -EINVAL;
+ 	struct address_space *mapping;
+@@ -484,7 +430,8 @@ out_unlock:
+ }
+ 
+ static struct vm_operations_struct nfs_file_vm_ops = {
+-	.fault = filemap_fault,
++	.nopage		= filemap_nopage,
++	.populate	= filemap_populate,
+ 	.page_mkwrite = nfs_vm_page_mkwrite,
+ };
+ 
+@@ -500,16 +447,16 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+ 	return 0;
+ }
+ 
+-static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+-				unsigned long nr_segs, loff_t pos)
++static ssize_t
++nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+ {
+-	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
++	struct dentry * dentry = iocb->ki_filp->f_dentry;
+ 	struct inode * inode = dentry->d_inode;
+ 	ssize_t result;
+-	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
+ 
+ 	if (iocb->ki_filp->f_flags & O_DIRECT)
+-		return nfs_file_direct_write(iocb, iov, nr_segs, pos);
++		return nfs_file_direct_write(iocb, &local_iov, 1, pos);
+ 
+ 	dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
+ 		dentry->d_parent->d_name.name, dentry->d_name.name,
+@@ -532,7 +479,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+ 		goto out;
+ 
+ 	nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
+-	result = generic_file_aio_write(iocb, iov, nr_segs, pos);
++	result = generic_file_aio_write(iocb, buf, count, pos);
+ 	/* Return error values for O_SYNC and IS_SYNC() */
+ 	if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
+ 		int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
+@@ -549,14 +496,20 @@ out_swapfile:
+ 
+ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
+ {
++	struct file_lock cfl;
+ 	struct inode *inode = filp->f_mapping->host;
+ 	int status = 0;
+ 
+ 	lock_kernel();
+ 	/* Try local locking first */
+-	posix_test_lock(filp, fl);
++	posix_test_lock(filp, fl, &cfl);
+ 	if (fl->fl_type != F_UNLCK) {
+ 		/* found a conflict */
++		fl->fl_start = cfl.fl_start;
++		fl->fl_end = cfl.fl_end;
++		fl->fl_type = cfl.fl_type;
++		fl->fl_pid = cfl.fl_pid;
++
+ 		goto out;
+ 	}
+ 
+@@ -662,8 +615,8 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+ 	int ret = -ENOLCK;
+ 
+ 	dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name,
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name,
+ 			fl->fl_type, fl->fl_flags,
+ 			(long long)fl->fl_start, (long long)fl->fl_end);
+ 
+@@ -695,8 +648,8 @@ out_err:
+ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+ {
+ 	dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name,
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name,
+ 			fl->fl_type, fl->fl_flags);
+ 
+ 	/*
+@@ -718,16 +671,3 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+ 		return do_unlk(filp, cmd, fl);
+ 	return do_setlk(filp, cmd, fl);
+ }
+-
+-/*
+- * There is no protocol support for leases, so we have no way to implement
+- * them correctly in the face of opens by other clients.
+- */
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+-{
+-	dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
+-			file->f_path.dentry->d_parent->d_name.name,
+-			file->f_path.dentry->d_name.name, arg);
+-
+-	return -EINVAL;
+-}
+diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
+index fae9719..5bf9b3c 100644
+--- a/fs/nfs/getroot.c
++++ b/fs/nfs/getroot.c
+@@ -30,7 +30,6 @@
+ #include <linux/nfs_idmap.h>
+ #include <linux/vfs.h>
+ #include <linux/namei.h>
+-#include <linux/mnt_namespace.h>
+ #include <linux/security.h>
+ 
+ #include <asm/system.h>
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 86147b0..148aebe 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -376,7 +376,7 @@ idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ static ssize_t
+ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ {
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	struct idmap *idmap = (struct idmap *)rpci->private;
+ 	struct idmap_msg im_in, *im = &idmap->idmap_im;
+ 	struct idmap_hashtable *h;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 52daefa..8e2b88a 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -612,7 +612,7 @@ static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
+  */
+ static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct nfs_inode *nfsi = NFS_I(inode);
+ 
+ 	filp->private_data = get_nfs_open_context(ctx);
+@@ -644,7 +644,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
+ 
+ static void nfs_file_clear_open_context(struct file *filp)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct nfs_open_context *ctx = nfs_file_open_context(filp);
+ 
+ 	if (ctx) {
+@@ -667,7 +667,7 @@ int nfs_open(struct inode *inode, struct file *filp)
+ 	cred = rpc_lookup_cred();
+ 	if (IS_ERR(cred))
+ 		return PTR_ERR(cred);
+-	ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
++	ctx = alloc_nfs_open_context(filp->f_vfsmnt, filp->f_dentry, cred);
+ 	put_rpccred(cred);
+ 	if (ctx == NULL)
+ 		return -ENOMEM;
+@@ -1242,7 +1242,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
+ #endif
+ }
+ 
+-static void init_once(void *foo)
++static void init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
+ 
+@@ -1314,6 +1314,10 @@ static int __init init_nfs_fs(void)
+ {
+ 	int err;
+ 
++	err = init_mnt_writers();
++	if (err)
++		goto out6;
++
+ 	err = nfsiod_start();
+ 	if (err)
+ 		goto out6;
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 66df08d..1e11b1d 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -107,29 +107,29 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 
+ 	BUG_ON(IS_ROOT(dentry));
+ 	dprintk("%s: enter\n", __func__);
+-	dput(nd->path.dentry);
+-	nd->path.dentry = dget(dentry);
++	dput(nd->dentry);
++	nd->dentry = dget(dentry);
+ 
+ 	/* Look it up again */
+-	parent = dget_parent(nd->path.dentry);
++	parent = dget_parent(nd->dentry);
+ 	err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
+-						  &nd->path.dentry->d_name,
++						  &nd->dentry->d_name,
+ 						  &fh, &fattr);
+ 	dput(parent);
+ 	if (err != 0)
+ 		goto out_err;
+ 
+ 	if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
+-		mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
++		mnt = nfs_do_refmount(nd->mnt, nd->dentry);
+ 	else
+-		mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
++		mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh,
+ 				      &fattr);
+ 	err = PTR_ERR(mnt);
+ 	if (IS_ERR(mnt))
+ 		goto out_err;
+ 
+ 	mntget(mnt);
+-	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
++	err = do_add_mount(mnt, nd, nd->mnt->mnt_flags|MNT_SHRINKABLE,
+ 			   &nfs_automount_list);
+ 	if (err < 0) {
+ 		mntput(mnt);
+@@ -137,9 +137,9 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 			goto out_follow;
+ 		goto out_err;
+ 	}
+-	path_put(&nd->path);
+-	nd->path.mnt = mnt;
+-	nd->path.dentry = dget(mnt->mnt_root);
++	backport_path_put(nd);
++	nd->mnt = mnt;
++	nd->dentry = dget(mnt->mnt_root);
+ 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+ out:
+ 	dprintk("%s: done, returned %d\n", __func__, err);
+@@ -147,22 +147,22 @@ out:
+ 	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
+ 	return ERR_PTR(err);
+ out_err:
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	goto out;
+ out_follow:
+-	while (d_mountpoint(nd->path.dentry) &&
+-	       follow_down(&nd->path.mnt, &nd->path.dentry))
++	while (d_mountpoint(nd->dentry) &&
++	       follow_down(&nd->mnt, &nd->dentry))
+ 		;
+ 	err = 0;
+ 	goto out;
+ }
+ 
+-const struct inode_operations nfs_mountpoint_inode_operations = {
++struct inode_operations nfs_mountpoint_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ 	.getattr	= nfs_getattr,
+ };
+ 
+-const struct inode_operations nfs_referral_inode_operations = {
++struct inode_operations nfs_referral_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ };
+ 
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 1e750e4..bdeef69 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -779,7 +779,7 @@ static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
+ static int
+ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 
+ 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ }
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index ea79064..7a8e6fa 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -165,7 +165,7 @@ struct nfs4_state_recovery_ops {
+ };
+ 
+ extern struct dentry_operations nfs4_dentry_operations;
+-extern const struct inode_operations nfs4_dir_inode_operations;
++extern struct inode_operations nfs4_dir_inode_operations;
+ 
+ /* inode.c */
+ extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c910413..02f1156 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1384,7 +1384,7 @@ struct dentry *
+ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct dentry *parent;
+@@ -1421,8 +1421,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ 	}
+ 	res = d_add_unique(dentry, igrab(state->inode));
+ 	if (res != NULL)
+-		path.dentry = res;
+-	nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
++		dentry = res;
++	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 	nfs_unblock_sillyrename(parent);
+ 	nfs4_intent_set_file(nd, &path, state);
+ 	return res;
+@@ -1432,7 +1432,7 @@ int
+ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct rpc_cred *cred;
+@@ -1880,7 +1880,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+                  int flags, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct nfs4_state *state;
+@@ -3671,7 +3671,7 @@ struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = {
+ 	.recover_lock	= nfs4_lock_expired,
+ };
+ 
+-static const struct inode_operations nfs4_file_inode_operations = {
++static struct inode_operations nfs4_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
+index 4dbb84d..c351a41 100644
+--- a/fs/nfs/proc.c
++++ b/fs/nfs/proc.c
+@@ -595,7 +595,7 @@ nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+ static int
+ nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 
+ 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ }
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index e9b2017..fdb1d7c 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -201,7 +201,7 @@ static match_table_t nfs_secflavor_tokens = {
+ };
+ 
+ 
+-static void nfs_umount_begin(struct super_block *);
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags);
+ static int  nfs_statfs(struct dentry *, struct kstatfs *);
+ static int  nfs_show_options(struct seq_file *, struct vfsmount *);
+ static int  nfs_show_stats(struct seq_file *, struct vfsmount *);
+@@ -228,7 +228,7 @@ struct file_system_type nfs_xdev_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
+ 
+-static const struct super_operations nfs_sops = {
++static struct super_operations nfs_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
+@@ -274,7 +274,7 @@ struct file_system_type nfs4_referral_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
+ 
+-static const struct super_operations nfs4_sops = {
++static struct super_operations nfs4_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
+@@ -287,10 +287,7 @@ static const struct super_operations nfs4_sops = {
+ };
+ #endif
+ 
+-static struct shrinker acl_shrinker = {
+-	.shrink		= nfs_access_cache_shrinker,
+-	.seeks		= DEFAULT_SEEKS,
+-};
++static struct shrinker *acl_shrinker;
+ 
+ /*
+  * Register the NFS filesystems
+@@ -299,7 +296,7 @@ int __init register_nfs_fs(void)
+ {
+ 	int ret;
+ 
+-        ret = register_filesystem(&nfs_fs_type);
++	ret = register_filesystem(&nfs_fs_type);
+ 	if (ret < 0)
+ 		goto error_0;
+ 
+@@ -311,7 +308,7 @@ int __init register_nfs_fs(void)
+ 	if (ret < 0)
+ 		goto error_2;
+ #endif
+-	register_shrinker(&acl_shrinker);
++	acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ 	return 0;
+ 
+ #ifdef CONFIG_NFS_V4
+@@ -329,7 +326,8 @@ error_0:
+  */
+ void __exit unregister_nfs_fs(void)
+ {
+-	unregister_shrinker(&acl_shrinker);
++	if (acl_shrinker != NULL)
++		remove_shrinker(acl_shrinker);
+ #ifdef CONFIG_NFS_V4
+ 	unregister_filesystem(&nfs4_fs_type);
+ #endif
+@@ -649,11 +647,13 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+  * Begin unmount by attempting to remove all automounted mountpoints we added
+  * in response to xdev traversals and referrals
+  */
+-static void nfs_umount_begin(struct super_block *sb)
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
+ {
+-	struct nfs_server *server = NFS_SB(sb);
++	struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb);
+ 	struct rpc_clnt *rpc;
+ 
++	if (!(flags & MNT_FORCE))
++		return;
+ 	/* -EIO all pending I/O */
+ 	rpc = server->client_acl;
+ 	if (!IS_ERR(rpc))
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 412738d..b17f14a 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -70,7 +70,7 @@ read_failed:
+ /*
+  * symlinks can't do much...
+  */
+-const struct inode_operations nfs_symlink_inode_operations = {
++struct inode_operations nfs_symlink_inode_operations = {
+ 	.readlink	= generic_readlink,
+ 	.follow_link	= nfs_follow_link,
+ 	.put_link	= page_put_link,
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 3229e21..7c1970e 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -13,7 +13,7 @@
+ #include <linux/file.h>
+ #include <linux/writeback.h>
+ #include <linux/swap.h>
+-
++#include <linux/mpage.h>
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/nfs_mount.h>
+@@ -726,8 +726,8 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
+ 
+ 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name, count,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name, count,
+ 		(long long)(page_offset(page) + offset));
+ 
+ 	/* If we're not using byte range locks, and we know the page
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 9dc036f..860d944 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -168,14 +168,15 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 			goto out;
+ 
+ 		dprintk("Found the path %s\n", buf);
+-		key.ek_path = nd.path;
++		key.ek_path.dentry = nd.dentry;
++		key.ek_path.mnt = nd.mnt;
+ 
+ 		ek = svc_expkey_update(&key, ek);
+ 		if (ek)
+ 			cache_put(&ek->h, &svc_expkey_cache);
+ 		else
+ 			err = -ENOMEM;
+-		path_put(&nd.path);
++		backport_path_put(&nd);
+ 	}
+ 	cache_flush();
+  out:
+@@ -204,7 +205,7 @@ static int expkey_show(struct seq_file *m,
+ 	if (test_bit(CACHE_VALID, &h->flags) && 
+ 	    !test_bit(CACHE_NEGATIVE, &h->flags)) {
+ 		seq_printf(m, " ");
+-		seq_path(m, &ek->ek_path, "\\ \t\n");
++		seq_path(m, ek->ek_path.mnt, ek->ek_path.dentry, "\\ \t\n");
+ 	}
+ 	seq_printf(m, "\n");
+ 	return 0;
+@@ -346,7 +347,7 @@ static void svc_export_request(struct cache_detail *cd,
+ 	char *pth;
+ 
+ 	qword_add(bpp, blen, exp->ex_client->name);
+-	pth = d_path(&exp->ex_path, *bpp, *blen);
++	pth = d_path(exp->ex_path.dentry, exp->ex_path.mnt, *bpp, *blen);
+ 	if (IS_ERR(pth)) {
+ 		/* is this correct? */
+ 		(*bpp)[0] = '\n';
+@@ -385,7 +386,7 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
+ 	}
+ 
+ 	if (!inode->i_sb->s_export_op ||
+-	    !inode->i_sb->s_export_op->fh_to_dentry) {
++	    !inode->i_sb->s_export_op->get_dentry) {
+ 		dprintk("exp_export: export of invalid fs type.\n");
+ 		return -EINVAL;
+ 	}
+@@ -504,7 +505,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	struct svc_export exp, *expp;
+ 	int an_int;
+ 
+-	nd.path.dentry = NULL;
++	nd.dentry = NULL;
+ 	exp.ex_pathname = NULL;
+ 
+ 	/* fs locations */
+@@ -544,8 +545,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 
+ 	exp.h.flags = 0;
+ 	exp.ex_client = dom;
+-	exp.ex_path.mnt = nd.path.mnt;
+-	exp.ex_path.dentry = nd.path.dentry;
++	exp.ex_path.mnt = nd.mnt;
++	exp.ex_path.dentry = nd.dentry;
+ 	exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
+ 	err = -ENOMEM;
+ 	if (!exp.ex_pathname)
+@@ -607,7 +608,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 				goto out;
+ 		}
+ 
+-		err = check_export(nd.path.dentry->d_inode, exp.ex_flags,
++		err = check_export(nd.dentry->d_inode, exp.ex_flags,
+ 				   exp.ex_uuid);
+ 		if (err) goto out;
+ 	}
+@@ -626,8 +627,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	nfsd4_fslocs_free(&exp.ex_fslocs);
+ 	kfree(exp.ex_uuid);
+ 	kfree(exp.ex_pathname);
+-	if (nd.path.dentry)
+-		path_put(&nd.path);
++	if (nd.dentry)
++		backport_path_put(&nd);
+  out_no_path:
+ 	if (dom)
+ 		auth_domain_put(dom);
+@@ -650,7 +651,7 @@ static int svc_export_show(struct seq_file *m,
+ 		return 0;
+ 	}
+ 	exp = container_of(h, struct svc_export, h);
+-	seq_path(m, &exp->ex_path, " \t\n\\");
++	seq_path(m, exp->ex_path.mnt, exp->ex_path.dentry, " \t\n\\");
+ 	seq_putc(m, '\t');
+ 	seq_escape(m, exp->ex_client->name, " \t\n\\");
+ 	seq_putc(m, '(');
+@@ -672,6 +673,7 @@ static int svc_export_show(struct seq_file *m,
+ 	seq_puts(m, ")\n");
+ 	return 0;
+ }
++
+ static int svc_export_match(struct cache_head *a, struct cache_head *b)
+ {
+ 	struct svc_export *orig = container_of(a, struct svc_export, h);
+@@ -1026,7 +1028,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto out_put_clp;
+ 	err = -EINVAL;
+ 
+-	exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
+ 
+ 	memset(&new, 0, sizeof(new));
+ 
+@@ -1034,8 +1036,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if ((nxp->ex_flags & NFSEXP_FSID) &&
+ 	    (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
+ 	    fsid_key->ek_path.mnt &&
+-	    (fsid_key->ek_path.mnt != nd.path.mnt ||
+-	     fsid_key->ek_path.dentry != nd.path.dentry))
++	    (fsid_key->ek_path.mnt != nd.mnt ||
++	     fsid_key->ek_path.dentry != nd.dentry))
+ 		goto finish;
+ 
+ 	if (!IS_ERR(exp)) {
+@@ -1051,7 +1053,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto finish;
+ 	}
+ 
+-	err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL);
++	err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL);
+ 	if (err) goto finish;
+ 
+ 	err = -ENOMEM;
+@@ -1064,7 +1066,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if (!new.ex_pathname)
+ 		goto finish;
+ 	new.ex_client = clp;
+-	new.ex_path = nd.path;
++	new.ex_path.mnt = nd.mnt;
++	new.ex_path.dentry = nd.dentry;
+ 	new.ex_flags = nxp->ex_flags;
+ 	new.ex_anon_uid = nxp->ex_anon_uid;
+ 	new.ex_anon_gid = nxp->ex_anon_gid;
+@@ -1090,7 +1093,7 @@ finish:
+ 		exp_put(exp);
+ 	if (fsid_key && !IS_ERR(fsid_key))
+ 		cache_put(&fsid_key->h, &svc_expkey_cache);
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ out_put_clp:
+ 	auth_domain_put(clp);
+ out_unlock:
+@@ -1143,8 +1146,8 @@ exp_unexport(struct nfsctl_export *nxp)
+ 		goto out_domain;
+ 
+ 	err = -EINVAL;
+-	exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
+-	path_put(&nd.path);
++	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
++	backport_path_put(&nd);
+ 	if (IS_ERR(exp))
+ 		goto out_domain;
+ 
+@@ -1180,12 +1183,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 		printk("nfsd: exp_rootfh path not found %s", path);
+ 		return err;
+ 	}
+-	inode = nd.path.dentry->d_inode;
++	inode = nd.dentry->d_inode;
+ 
+ 	dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
+-		 path, nd.path.dentry, clp->name,
++		 path, nd.dentry, clp->name,
+ 		 inode->i_sb->s_id, inode->i_ino);
+-	exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
+ 	if (IS_ERR(exp)) {
+ 		err = PTR_ERR(exp);
+ 		goto out;
+@@ -1195,7 +1198,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	 * fh must be initialized before calling fh_compose
+ 	 */
+ 	fh_init(&fh, maxsize);
+-	if (fh_compose(&fh, exp, nd.path.dentry, NULL))
++	if (fh_compose(&fh, exp, nd.dentry, NULL))
+ 		err = -EINVAL;
+ 	else
+ 		err = 0;
+@@ -1203,7 +1206,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	fh_put(&fh);
+ 	exp_put(exp);
+ out:
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return err;
+ }
+ 
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 145b3c8..ad22c29 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -121,9 +121,9 @@ out_no_tfm:
+ static void
+ nfsd4_sync_rec_dir(void)
+ {
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	nfsd_sync_dir(rec_dir.path.dentry);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	nfsd_sync_dir(rec_dir.dentry);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ }
+ 
+ int
+@@ -143,9 +143,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 	nfs4_save_user(&uid, &gid);
+ 
+ 	/* lock the parent */
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
+ 
+-	dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1);
++	dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		goto out_unlock;
+@@ -155,15 +155,15 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 		dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
+ 		goto out_put;
+ 	}
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out_put;
+-	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
+-	mnt_drop_write(rec_dir.path.mnt);
++	status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU);
++	mnt_drop_write(rec_dir.mnt);
+ out_put:
+ 	dput(dentry);
+ out_unlock:
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (status == 0) {
+ 		clp->cl_firststate = 1;
+ 		nfsd4_sync_rec_dir();
+@@ -226,7 +226,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
+ 
+ 	nfs4_save_user(&uid, &gid);
+ 
+-	filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY);
++	filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
+ 	status = PTR_ERR(filp);
+ 	if (IS_ERR(filp))
+ 		goto out;
+@@ -291,9 +291,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+ 
+ 	dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
+ 
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	dentry = lookup_one_len(name, rec_dir.path.dentry, namlen);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	dentry = lookup_one_len(name, rec_dir.dentry, namlen);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		return status;
+@@ -302,7 +302,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+ 	if (!dentry->d_inode)
+ 		goto out;
+ 
+-	status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry);
++	status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
+ out:
+ 	dput(dentry);
+ 	return status;
+@@ -318,7 +318,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	if (!rec_dir_init || !clp->cl_firststate)
+ 		return;
+ 
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+ 	clp->cl_firststate = 0;
+@@ -327,7 +327,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	nfs4_reset_user(uid, gid);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("NFSD: Failed to remove expired client state directory"
+@@ -357,17 +357,17 @@ nfsd4_recdir_purge_old(void) {
+ 
+ 	if (!rec_dir_init)
+ 		return;
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("nfsd4: failed to purge old clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ }
+ 
+ static int
+@@ -387,10 +387,10 @@ int
+ nfsd4_recdir_load(void) {
+ 	int status;
+ 
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir);
+ 	if (status)
+ 		printk("nfsd4: failed loading clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ 	return status;
+ }
+ 
+@@ -429,5 +429,5 @@ nfsd4_shutdown_recdir(void)
+ 	if (!rec_dir_init)
+ 		return;
+ 	rec_dir_init = 0;
+-	path_put(&rec_dir.path);
++	backport_path_put(&rec_dir);
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 1578d7a..1c6df07 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1576,7 +1576,7 @@ static __be32
+ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
+ {
+ 	struct file *filp = stp->st_vfs_file;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	unsigned int share_access, new_writer;
+ 	__be32 status;
+ 
+@@ -1923,7 +1923,7 @@ search_close_lru(u32 st_id, int flags)
+ static inline int
+ nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
+ {
+-	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode;
++	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_dentry->d_inode;
+ }
+ 
+ static int
+@@ -2838,7 +2838,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	 * only the dentry:inode set.
+ 	 */
+ 	memset(&file, 0, sizeof (struct file));
+-	file.f_path.dentry = cstate->current_fh.fh_dentry;
++	file.f_dentry = cstate->current_fh.fh_dentry;
+ 
+ 	status = nfs_ok;
+ 	error = vfs_test_lock(&file, &file_lock);
+@@ -2934,7 +2934,7 @@ static int
+ check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
+ {
+ 	struct file_lock **flpp;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	int status = 0;
+ 
+ 	lock_kernel();
+@@ -3294,11 +3294,11 @@ nfs4_reset_recoverydir(char *recdir)
+ 	if (status)
+ 		return status;
+ 	status = -ENOTDIR;
+-	if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
++	if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
+ 		nfs4_set_recdir(recdir);
+ 		status = 0;
+ 	}
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return status;
+ }
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index c53e65f..fc2871b 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -121,7 +121,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+ 
+ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+ {
+-	ino_t ino =  file->f_path.dentry->d_inode->i_ino;
++	ino_t ino = file->f_dentry->d_inode->i_ino;
+ 	char *data;
+ 	ssize_t rv;
+ 
+@@ -360,9 +360,9 @@ static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+ 	if (error)
+ 		return error;
+ 
+-	error = nlmsvc_unlock_all_by_sb(nd.path.mnt->mnt_sb);
++	error = nlmsvc_unlock_all_by_sb(nd.mnt->mnt_sb);
+ 
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return error;
+ }
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 80292ff..47eb160 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -574,3 +574,5 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 	nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
+ 	return 1;
+ }
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 18060be..ca19ab2 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -23,7 +23,6 @@
+ #include <linux/file.h>
+ #include <linux/mount.h>
+ #include <linux/major.h>
+-#include <linux/splice.h>
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -97,7 +96,7 @@ static struct raparm_hbucket	raparm_hash[RAPARM_HASH_SIZE];
+  */
+ int
+ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, 
+-		        struct svc_export **expp)
++			struct svc_export **expp)
+ {
+ 	struct svc_export *exp = *expp, *exp2 = NULL;
+ 	struct dentry *dentry = *dpp;
+@@ -765,11 +764,11 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
+ static int
+ nfsd_sync(struct file *filp)
+ {
+-        int err;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
+-	dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name);
++	int err;
++	struct inode *inode = filp->f_dentry->d_inode;
++	dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
+ 	mutex_lock(&inode->i_mutex);
+-	err=nfsd_dosync(filp, filp->f_path.dentry, filp->f_op);
++	err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
+ 	mutex_unlock(&inode->i_mutex);
+ 
+ 	return err;
+@@ -828,53 +827,39 @@ found:
+ 	return ra;
+ }
+ 
+-/*
+- * Grab and keep cached pages associated with a file in the svc_rqst
+- * so that they can be passed to the network sendmsg/sendpage routines
+- * directly. They will be released after the sending has completed.
+- */
+ static int
+-nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+-		  struct splice_desc *sd)
++nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset , unsigned long size)
+ {
+-	struct svc_rqst *rqstp = sd->u.data;
++	unsigned long count = desc->count;
++	struct svc_rqst *rqstp = desc->arg.data;
+ 	struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
+-	struct page *page = buf->page;
+-	size_t size;
+-	int ret;
+ 
+-	ret = buf->ops->confirm(pipe, buf);
+-	if (unlikely(ret))
+-		return ret;
+-
+-	size = sd->len;
++	if (size > count)
++		size = count;
+ 
+ 	if (rqstp->rq_res.page_len == 0) {
+ 		get_page(page);
+-		put_page(*pp);
+-		*pp = page;
+-		rqstp->rq_resused++;
+-		rqstp->rq_res.page_base = buf->offset;
++		if (*pp)
++			put_page(*pp);
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
++		rqstp->rq_res.page_base = offset;
+ 		rqstp->rq_res.page_len = size;
+-	} else if (page != pp[-1]) {
++	} else if (page != rqstp->rq_respages[rqstp->rq_resused-1]) {
+ 		get_page(page);
+ 		if (*pp)
+ 			put_page(*pp);
+ 		*pp = page;
+-		rqstp->rq_resused++;
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
+ 		rqstp->rq_res.page_len += size;
+-	} else
++	} else {
+ 		rqstp->rq_res.page_len += size;
++	}
+ 
++	desc->count = count - size;
++	desc->written += size;
+ 	return size;
+ }
+ 
+-static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
+-				    struct splice_desc *sd)
+-{
+-	return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
+-}
+-
+ static inline int svc_msnfs(struct svc_fh *ffhp)
+ {
+ #ifdef MSNFS
+@@ -886,7 +871,7 @@ static inline int svc_msnfs(struct svc_fh *ffhp)
+ 
+ static __be32
+ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+-              loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
++	      loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
+ {
+ 	struct inode *inode;
+ 	struct raparms	*ra;
+@@ -895,7 +880,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	int		host_err;
+ 
+ 	err = nfserr_perm;
+-	inode = file->f_path.dentry->d_inode;
++	inode = file->f_dentry->d_inode;
+ 
+ 	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
+ 		goto out;
+@@ -906,16 +891,9 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	if (ra && ra->p_set)
+ 		file->f_ra = ra->p_ra;
+ 
+-	if (file->f_op->splice_read && rqstp->rq_splice_ok) {
+-		struct splice_desc sd = {
+-			.len		= 0,
+-			.total_len	= *count,
+-			.pos		= offset,
+-			.u.data		= rqstp,
+-		};
+-
++	if (file->f_op->sendfile && rqstp->rq_sendfile_ok) {
+ 		rqstp->rq_resused = 1;
+-		host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
++		host_err = file->f_op->sendfile(file, &offset, *count, nfsd_read_actor, rqstp);
+ 	} else {
+ 		oldfs = get_fs();
+ 		set_fs(KERNEL_DS);
+@@ -937,7 +915,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 		nfsdstats.io_read += host_err;
+ 		*count = host_err;
+ 		err = 0;
+-		fsnotify_access(file->f_path.dentry);
++		fsnotify_access(file->f_dentry);
+ 	} else 
+ 		err = nfserrno(host_err);
+ out:
+@@ -971,11 +949,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	err = nfserr_perm;
+ 
+ 	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
+-		(!lock_may_write(file->f_path.dentry->d_inode, offset, cnt)))
++		(!lock_may_write(file->f_dentry->d_inode, offset, cnt)))
+ 		goto out;
+ #endif
+ 
+-	dentry = file->f_path.dentry;
++	dentry = file->f_dentry;
+ 	inode = dentry->d_inode;
+ 	exp   = fhp->fh_export;
+ 
+@@ -1004,7 +982,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	set_fs(oldfs);
+ 	if (host_err >= 0) {
+ 		nfsdstats.io_write += cnt;
+-		fsnotify_modify(file->f_path.dentry);
++		fsnotify_modify(file->f_dentry);
+ 	}
+ 
+ 	/* clear setuid/setgid flag after write */
+@@ -1129,7 +1107,7 @@ out:
+  */
+ __be32
+ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
+-               loff_t offset, unsigned long count)
++	       loff_t offset, unsigned long count)
+ {
+ 	struct file	*file;
+ 	__be32		err;
+@@ -1316,7 +1294,7 @@ __be32
+ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		char *fname, int flen, struct iattr *iap,
+ 		struct svc_fh *resfhp, int createmode, u32 *verifier,
+-	        int *truncp, int *created)
++		int *truncp, int *created)
+ {
+ 	struct dentry	*dentry, *dchild = NULL;
+ 	struct inode	*dirp;
+diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
+index 27e772c..d932fb1 100644
+--- a/include/linux/exportfs.h
++++ b/include/linux/exportfs.h
+@@ -89,85 +89,9 @@ struct fid {
+ 	};
+ };
+ 
+-/**
+- * struct export_operations - for nfsd to communicate with file systems
+- * @encode_fh:      encode a file handle fragment from a dentry
+- * @fh_to_dentry:   find the implied object and get a dentry for it
+- * @fh_to_parent:   find the implied object's parent and get a dentry for it
+- * @get_name:       find the name for a given inode in a given directory
+- * @get_parent:     find the parent of a given directory
+- *
+- * See Documentation/filesystems/Exporting for details on how to use
+- * this interface correctly.
+- *
+- * encode_fh:
+- *    @encode_fh should store in the file handle fragment @fh (using at most
+- *    @max_len bytes) information that can be used by @decode_fh to recover the
+- *    file refered to by the &struct dentry @de.  If the @connectable flag is
+- *    set, the encode_fh() should store sufficient information so that a good
+- *    attempt can be made to find not only the file but also it's place in the
+- *    filesystem.   This typically means storing a reference to de->d_parent in
+- *    the filehandle fragment.  encode_fh() should return the number of bytes
+- *    stored or a negative error code such as %-ENOSPC
+- *
+- * fh_to_dentry:
+- *    @fh_to_dentry is given a &struct super_block (@sb) and a file handle
+- *    fragment (@fh, @fh_len). It should return a &struct dentry which refers
+- *    to the same file that the file handle fragment refers to.  If it cannot,
+- *    it should return a %NULL pointer if the file was found but no acceptable
+- *    &dentries were available, or an %ERR_PTR error code indicating why it
+- *    couldn't be found (e.g. %ENOENT or %ENOMEM).  Any suitable dentry can be
+- *    returned including, if necessary, a new dentry created with d_alloc_root.
+- *    The caller can then find any other extant dentries by following the
+- *    d_alias links.
+- *
+- * fh_to_parent:
+- *    Same as @fh_to_dentry, except that it returns a pointer to the parent
+- *    dentry if it was encoded into the filehandle fragment by @encode_fh.
+- *
+- * get_name:
+- *    @get_name should find a name for the given @child in the given @parent
+- *    directory.  The name should be stored in the @name (with the
+- *    understanding that it is already pointing to a a %NAME_MAX+1 sized
+- *    buffer.   get_name() should return %0 on success, a negative error code
+- *    or error.  @get_name will be called without @parent->i_mutex held.
+- *
+- * get_parent:
+- *    @get_parent should find the parent directory for the given @child which
+- *    is also a directory.  In the event that it cannot be found, or storage
+- *    space cannot be allocated, a %ERR_PTR should be returned.
+- *
+- * Locking rules:
+- *    get_parent is called with child->d_inode->i_mutex down
+- *    get_name is not (which is possibly inconsistent)
+- */
+-
+-struct export_operations {
+-	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+-			int connectable);
+-	struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	int (*get_name)(struct dentry *parent, char *name,
+-			struct dentry *child);
+-	struct dentry * (*get_parent)(struct dentry *child);
+-};
+-
+ extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
+ 	int *max_len, int connectable);
+ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+ 	void *context);
+-
+-/*
+- * Generic helpers for filesystems.
+- */
+-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-
+ #endif /* LINUX_EXPORTFS_H */
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index dbb87ab..9236e80 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -230,7 +230,7 @@ int           nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+ 
+ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
+ {
+-	return file->f_file->f_path.dentry->d_inode;
++	return file->f_file->f_dentry->d_inode;
+ }
+ 
+ /*
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 78a5922..e59d828 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -9,6 +9,7 @@
+ #ifndef _LINUX_NFS_FS_H
+ #define _LINUX_NFS_FS_H
+ 
++#include <linux/path.h>
+ #include <linux/magic.h>
+ 
+ /* Default timeout values */
+@@ -331,7 +332,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+ extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-extern int nfs_permission(struct inode *, int);
++extern int nfs_permission(struct inode *, int, struct nameidata *);
+ extern int nfs_open(struct inode *, struct file *);
+ extern int nfs_release(struct inode *, struct file *);
+ extern int nfs_attribute_timeout(struct inode *inode);
+@@ -358,9 +359,9 @@ static inline void nfs_fattr_init(struct nfs_fattr *fattr)
+ /*
+  * linux/fs/nfs/file.c
+  */
+-extern const struct inode_operations nfs_file_inode_operations;
++extern struct inode_operations nfs_file_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_file_inode_operations;
++extern struct inode_operations nfs3_file_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_file_operations;
+ extern const struct address_space_operations nfs_file_aops;
+@@ -408,9 +409,9 @@ extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ /*
+  * linux/fs/nfs/dir.c
+  */
+-extern const struct inode_operations nfs_dir_inode_operations;
++extern struct inode_operations nfs_dir_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_dir_inode_operations;
++extern struct inode_operations nfs3_dir_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_dir_operations;
+ extern struct dentry_operations nfs_dentry_operations;
+@@ -423,7 +424,7 @@ extern void nfs_access_zap_cache(struct inode *inode);
+ /*
+  * linux/fs/nfs/symlink.c
+  */
+-extern const struct inode_operations nfs_symlink_inode_operations;
++extern struct inode_operations nfs_symlink_inode_operations;
+ 
+ /*
+  * linux/fs/nfs/sysctl.c
+@@ -439,8 +440,8 @@ extern void nfs_unregister_sysctl(void);
+ /*
+  * linux/fs/nfs/namespace.c
+  */
+-extern const struct inode_operations nfs_mountpoint_inode_operations;
+-extern const struct inode_operations nfs_referral_inode_operations;
++extern struct inode_operations nfs_mountpoint_inode_operations;
++extern struct inode_operations nfs_referral_inode_operations;
+ extern int nfs_mountpoint_expiry_timeout;
+ extern void nfs_release_automount_timer(void);
+ 
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 8c77c11..d9007dc 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -782,8 +782,8 @@ struct nfs_access_entry;
+ struct nfs_rpc_ops {
+ 	u32	version;		/* Protocol version */
+ 	struct dentry_operations *dentry_ops;
+-	const struct inode_operations *dir_inode_ops;
+-	const struct inode_operations *file_inode_ops;
++	struct inode_operations *dir_inode_ops;
++	struct inode_operations *file_inode_ops;
+ 
+ 	int	(*getroot) (struct nfs_server *, struct nfs_fh *,
+ 			    struct nfs_fsinfo *);
+diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
+index 5431512..3753e4b 100644
+--- a/include/linux/nfsd/export.h
++++ b/include/linux/nfsd/export.h
+@@ -15,6 +15,7 @@
+ # include <linux/types.h>
+ # include <linux/in.h>
+ #endif
++#include <linux/path.h>
+ 
+ /*
+  * Important limits for the exports stuff.
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+deleted file mode 100644
+index 8e41202..0000000
+--- a/include/linux/pipe_fs_i.h
++++ /dev/null
+@@ -1,151 +0,0 @@
+-#ifndef _LINUX_PIPE_FS_I_H
+-#define _LINUX_PIPE_FS_I_H
+-
+-#define PIPEFS_MAGIC 0x50495045
+-
+-#define PIPE_BUFFERS (16)
+-
+-#define PIPE_BUF_FLAG_LRU	0x01	/* page is on the LRU */
+-#define PIPE_BUF_FLAG_ATOMIC	0x02	/* was atomically mapped */
+-#define PIPE_BUF_FLAG_GIFT	0x04	/* page is a gift */
+-
+-/**
+- *	struct pipe_buffer - a linux kernel pipe buffer
+- *	@page: the page containing the data for the pipe buffer
+- *	@offset: offset of data inside the @page
+- *	@len: length of data inside the @page
+- *	@ops: operations associated with this buffer. See @pipe_buf_operations.
+- *	@flags: pipe buffer flags. See above.
+- *	@private: private data owned by the ops.
+- **/
+-struct pipe_buffer {
+-	struct page *page;
+-	unsigned int offset, len;
+-	const struct pipe_buf_operations *ops;
+-	unsigned int flags;
+-	unsigned long private;
+-};
+-
+-/**
+- *	struct pipe_inode_info - a linux kernel pipe
+- *	@wait: reader/writer wait point in case of empty/full pipe
+- *	@nrbufs: the number of non-empty pipe buffers in this pipe
+- *	@curbuf: the current pipe buffer entry
+- *	@tmp_page: cached released page
+- *	@readers: number of current readers of this pipe
+- *	@writers: number of current writers of this pipe
+- *	@waiting_writers: number of writers blocked waiting for room
+- *	@r_counter: reader counter
+- *	@w_counter: writer counter
+- *	@fasync_readers: reader side fasync
+- *	@fasync_writers: writer side fasync
+- *	@inode: inode this pipe is attached to
+- *	@bufs: the circular array of pipe buffers
+- **/
+-struct pipe_inode_info {
+-	wait_queue_head_t wait;
+-	unsigned int nrbufs, curbuf;
+-	struct page *tmp_page;
+-	unsigned int readers;
+-	unsigned int writers;
+-	unsigned int waiting_writers;
+-	unsigned int r_counter;
+-	unsigned int w_counter;
+-	struct fasync_struct *fasync_readers;
+-	struct fasync_struct *fasync_writers;
+-	struct inode *inode;
+-	struct pipe_buffer bufs[PIPE_BUFFERS];
+-};
+-
+-/*
+- * Note on the nesting of these functions:
+- *
+- * ->confirm()
+- *	->steal()
+- *	...
+- *	->map()
+- *	...
+- *	->unmap()
+- *
+- * That is, ->map() must be called on a confirmed buffer,
+- * same goes for ->steal(). See below for the meaning of each
+- * operation. Also see kerneldoc in fs/pipe.c for the pipe
+- * and generic variants of these hooks.
+- */
+-struct pipe_buf_operations {
+-	/*
+-	 * This is set to 1, if the generic pipe read/write may coalesce
+-	 * data into an existing buffer. If this is set to 0, a new pipe
+-	 * page segment is always used for new data.
+-	 */
+-	int can_merge;
+-
+-	/*
+-	 * ->map() returns a virtual address mapping of the pipe buffer.
+-	 * The last integer flag reflects whether this should be an atomic
+-	 * mapping or not. The atomic map is faster, however you can't take
+-	 * page faults before calling ->unmap() again. So if you need to eg
+-	 * access user data through copy_to/from_user(), then you must get
+-	 * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
+-	 * atomic maps, so you can't map more than one pipe_buffer at once
+-	 * and you have to be careful if mapping another page as source
+-	 * or destination for a copy (IOW, it has to use something else
+-	 * than KM_USER0).
+-	 */
+-	void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
+-
+-	/*
+-	 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
+-	 */
+-	void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-
+-	/*
+-	 * ->confirm() verifies that the data in the pipe buffer is there
+-	 * and that the contents are good. If the pages in the pipe belong
+-	 * to a file system, we may need to wait for IO completion in this
+-	 * hook. Returns 0 for good, or a negative error value in case of
+-	 * error.
+-	 */
+-	int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * When the contents of this pipe buffer has been completely
+-	 * consumed by a reader, ->release() is called.
+-	 */
+-	void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Attempt to take ownership of the pipe buffer and its contents.
+-	 * ->steal() returns 0 for success, in which case the contents
+-	 * of the pipe (the buf->page) is locked and now completely owned
+-	 * by the caller. The page may then be transferred to a different
+-	 * mapping, the most often used case is insertion into different
+-	 * file address space cache.
+-	 */
+-	int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Get a reference to the pipe buffer.
+-	 */
+-	void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+-};
+-
+-/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+-   memory allocation, whereas PIPE_BUF makes atomicity guarantees.  */
+-#define PIPE_SIZE		PAGE_SIZE
+-
+-/* Drop the inode semaphore and wait for a pipe event, atomically */
+-void pipe_wait(struct pipe_inode_info *pipe);
+-
+-struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
+-void free_pipe_info(struct inode * inode);
+-void __free_pipe_info(struct pipe_inode_info *);
+-
+-/* Generic pipe buffer ops functions */
+-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
+-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-#endif
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+deleted file mode 100644
+index 528dcb9..0000000
+--- a/include/linux/splice.h
++++ /dev/null
+@@ -1,74 +0,0 @@
+-/*
+- * Function declerations and data structures related to the splice
+- * implementation.
+- *
+- * Copyright (C) 2007 Jens Axboe <jens.axboe at oracle.com>
+- *
+- */
+-#ifndef SPLICE_H
+-#define SPLICE_H
+-
+-#include <linux/pipe_fs_i.h>
+-
+-/*
+- * splice is tied to pipes as a transport (at least for now), so we'll just
+- * add the splice flags here.
+- */
+-#define SPLICE_F_MOVE	(0x01)	/* move pages instead of copying */
+-#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+-				 /* we may still block on the fd we splice */
+-				 /* from/to, of course */
+-#define SPLICE_F_MORE	(0x04)	/* expect more data */
+-#define SPLICE_F_GIFT	(0x08)	/* pages passed in are a gift */
+-
+-/*
+- * Passed to the actors
+- */
+-struct splice_desc {
+-	unsigned int len, total_len;	/* current and remaining length */
+-	unsigned int flags;		/* splice flags */
+-	/*
+-	 * actor() private data
+-	 */
+-	union {
+-		void __user *userptr;	/* memory to write to */
+-		struct file *file;	/* file to read/write */
+-		void *data;		/* cookie */
+-	} u;
+-	loff_t pos;			/* file position */
+-};
+-
+-struct partial_page {
+-	unsigned int offset;
+-	unsigned int len;
+-	unsigned long private;
+-};
+-
+-/*
+- * Passed to splice_to_pipe
+- */
+-struct splice_pipe_desc {
+-	struct page **pages;		/* page map */
+-	struct partial_page *partial;	/* pages[] may not be contig */
+-	int nr_pages;			/* number of pages in map */
+-	unsigned int flags;		/* splice flags */
+-	const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+-	void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+-};
+-
+-typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
+-			   struct splice_desc *);
+-typedef int (splice_direct_actor)(struct pipe_inode_info *,
+-				  struct splice_desc *);
+-
+-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
+-				loff_t *, size_t, unsigned int,
+-				splice_actor *);
+-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
+-				  struct splice_desc *, splice_actor *);
+-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
+-			      struct splice_pipe_desc *);
+-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+-				      splice_direct_actor *);
+-
+-#endif
+diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
+index 10709cb..9bbadbd 100644
+--- a/include/linux/sunrpc/debug.h
++++ b/include/linux/sunrpc/debug.h
+@@ -88,6 +88,7 @@ enum {
+ 	CTL_SLOTTABLE_TCP,
+ 	CTL_MIN_RESVPORT,
+ 	CTL_MAX_RESVPORT,
++	CTL_TRANSPORT,
+ };
+ 
+ #endif /* _LINUX_SUNRPC_DEBUG_H_ */
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index dc69068..3a0f48f 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -255,7 +255,7 @@ struct svc_rqst {
+ 						 * determine what device number
+ 						 * to report (real or virtual)
+ 						 */
+-	int			rq_splice_ok;   /* turned off in gss privacy
++	int			rq_sendfile_ok;   /* turned off in gss privacy
+ 						 * to prevent encrypting page
+ 						 * cache pages */
+ 	wait_queue_head_t	rq_wait;	/* synchronization */
+diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
+index 6bfea9e..f0a110d 100644
+--- a/net/sunrpc/auth.c
++++ b/net/sunrpc/auth.c
+@@ -566,19 +566,16 @@ rpcauth_uptodatecred(struct rpc_task *task)
+ 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
+ }
+ 
+-static struct shrinker rpc_cred_shrinker = {
+-	.shrink = rpcauth_cache_shrinker,
+-	.seeks = DEFAULT_SEEKS,
+-};
++static struct shrinker *rpc_cred_shrinker;
+ 
+ void __init rpcauth_init_module(void)
+ {
+ 	rpc_init_authunix();
+ 	rpc_init_generic_auth();
+-	register_shrinker(&rpc_cred_shrinker);
++	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
+ }
+ 
+ void __exit rpcauth_remove_module(void)
+ {
+-	unregister_shrinker(&rpc_cred_shrinker);
++	remove_shrinker(rpc_cred_shrinker);
+ }
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 853a414..71ba862 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -481,7 +481,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ 	const void *p, *end;
+ 	void *buf;
+ 	struct gss_upcall_msg *gss_msg;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct gss_cl_ctx *ctx;
+ 	uid_t uid;
+ 	ssize_t err = -EFBIG;
+diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
+index ef45eba..423251a 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
+@@ -99,6 +99,7 @@ get_key(const void *p, const void *end, struct crypto_blkcipher **res)
+ 			printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
+ 			goto out_err_free_key;
+ 	}
++
+ 	*res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
+ 	if (IS_ERR(*res)) {
+ 		printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 81ae3d6..acfb1d1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -859,7 +859,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+ 	u32 priv_len, maj_stat;
+ 	int pad, saved_len, remaining_len, offset;
+ 
+-	rqstp->rq_splice_ok = 0;
++	rqstp->rq_sendfile_ok = 0;
+ 
+ 	priv_len = svc_getnl(&buf->head[0]);
+ 	if (rqstp->rq_deferred) {
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index c996671..58e606e 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -696,7 +696,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+ {
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_request *rq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 	int err;
+ 
+ 	if (count == 0)
+@@ -773,7 +773,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
+ 	    loff_t *ppos)
+ {
+ 	int err;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 
+ 	if (count == 0)
+ 		return 0;
+@@ -804,7 +804,7 @@ cache_poll(struct file *filp, poll_table *wait)
+ 	unsigned int mask;
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_queue *cq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 
+ 	poll_wait(filp, &queue_wait, wait);
+ 
+@@ -1239,7 +1239,7 @@ static int c_show(struct seq_file *m, void *p)
+ 	return cd->cache_show(m, cd, cp);
+ }
+ 
+-static const struct seq_operations cache_content_op = {
++static struct seq_operations cache_content_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+@@ -1269,7 +1269,7 @@ static const struct file_operations content_file_operations = {
+ static ssize_t read_flush(struct file *file, char __user *buf,
+ 			    size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	unsigned long p = *ppos;
+ 	size_t len;
+@@ -1290,7 +1290,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
+ static ssize_t write_flush(struct file * file, const char __user * buf,
+ 			     size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	char *ep;
+ 	long flushtime;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 76739e9..11bfb52 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -213,10 +213,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
+ 	}
+ 
+ 	/* save the nodename */
+-	clnt->cl_nodelen = strlen(utsname()->nodename);
++	clnt->cl_nodelen = strlen(system_utsname.nodename);
+ 	if (clnt->cl_nodelen > UNX_MAXNODENAME)
+ 		clnt->cl_nodelen = UNX_MAXNODENAME;
+-	memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
++	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
+ 	rpc_register_client(clnt);
+ 	return clnt;
+ 
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 23a2b8f..003a6ec 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -26,6 +26,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/workqueue.h>
+ #include <linux/sunrpc/rpc_pipe_fs.h>
++#include <linux/path.h>
+ 
+ static struct vfsmount *rpc_mount __read_mostly;
+ static int rpc_mount_count;
+@@ -224,7 +225,7 @@ out:
+ static ssize_t
+ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	struct rpc_pipe_msg *msg;
+ 	int res = 0;
+@@ -267,7 +268,7 @@ out_unlock:
+ static ssize_t
+ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	int res;
+ 
+@@ -285,7 +286,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
+ 	struct rpc_inode *rpci;
+ 	unsigned int mask = 0;
+ 
+-	rpci = RPC_I(filp->f_path.dentry->d_inode);
++	rpci = RPC_I(filp->f_dentry->d_inode);
+ 	poll_wait(filp, &rpci->waitq, wait);
+ 
+ 	mask = POLLOUT | POLLWRNORM;
+@@ -300,7 +301,7 @@ static int
+ rpc_pipe_ioctl(struct inode *ino, struct file *filp,
+ 		unsigned int cmd, unsigned long arg)
+ {
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	int len;
+ 
+ 	switch (cmd) {
+@@ -495,7 +496,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
+ static void
+ rpc_release_path(struct nameidata *nd)
+ {
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	rpc_put_mount();
+ }
+ 
+@@ -668,7 +669,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
+ 
+ 	if ((error = rpc_lookup_parent(path, nd)) != 0)
+ 		return ERR_PTR(error);
+-	dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
++	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len,
+ 				   1);
+ 	if (IS_ERR(dentry))
+ 		rpc_release_path(nd);
+@@ -696,7 +697,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
+ 	dentry = rpc_lookup_negative(path, &nd);
+ 	if (IS_ERR(dentry))
+ 		return dentry;
+-	dir = nd.path.dentry->d_inode;
++	dir = nd.dentry->d_inode;
+ 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
+ 		goto err_dput;
+ 	RPC_I(dentry->d_inode)->private = rpc_client;
+@@ -897,7 +898,7 @@ static struct file_system_type rpc_pipe_fs_type = {
+ };
+ 
+ static void
+-init_once(void *foo)
++init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
+ 
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 24db2b4..0f6f1ea 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -117,18 +117,6 @@ static void rpcb_map_release(void *data)
+ 	kfree(map);
+ }
+ 
+-static const struct sockaddr_in rpcb_inaddr_loopback = {
+-	.sin_family		= AF_INET,
+-	.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
+-	.sin_port		= htons(RPCBIND_PORT),
+-};
+-
+-static const struct sockaddr_in6 rpcb_in6addr_loopback = {
+-	.sin6_family		= AF_INET6,
+-	.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
+-	.sin6_port		= htons(RPCBIND_PORT),
+-};
+-
+ static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
+ 					  size_t addrlen, u32 version)
+ {
+@@ -248,6 +236,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
+ 		.rpc_argp	= &map,
+ 		.rpc_resp	= okay,
+ 	};
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
+ 
+ 	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
+ 			"rpcbind\n", (port ? "" : "un"),
+@@ -272,6 +265,12 @@ static int rpcb_register_netid4(struct sockaddr_in *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin_port);
+ 	char buf[32];
+ 
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIPQUAD_FMT".%u.%u",
+@@ -303,6 +302,12 @@ static int rpcb_register_netid6(struct sockaddr_in6 *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin6_port);
+ 	char buf[64];
+ 
++	struct sockaddr_in6 rpcb_in6addr_loopback = {
++		.sin6_family		= AF_INET6,
++		.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
++		.sin6_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET6 universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIP6_FMT".%u.%u",
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index 50b049c..5053a5f 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -264,7 +264,7 @@ rpc_proc_init(void)
+ 	dprintk("RPC:       registering /proc/net/rpc\n");
+ 	if (!proc_net_rpc) {
+ 		struct proc_dir_entry *ent;
+-		ent = proc_mkdir("rpc", init_net.proc_net);
++		ent = proc_mkdir("rpc", proc_net);
+ 		if (ent) {
+ 			ent->owner = THIS_MODULE;
+ 			proc_net_rpc = ent;
+@@ -278,7 +278,7 @@ rpc_proc_exit(void)
+ 	dprintk("RPC:       unregistering /proc/net/rpc\n");
+ 	if (proc_net_rpc) {
+ 		proc_net_rpc = NULL;
+-		remove_proc_entry("rpc", init_net.proc_net);
++		remove_proc_entry("rpc", proc_net);
+ 	}
+ }
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 5a32cb7..e0e87c6 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -174,7 +174,7 @@ fail:
+ static int
+ svc_pool_map_init_percpu(struct svc_pool_map *m)
+ {
+-	unsigned int maxpools = nr_cpu_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int cpu;
+ 	int err;
+@@ -202,7 +202,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
+ static int
+ svc_pool_map_init_pernode(struct svc_pool_map *m)
+ {
+-	unsigned int maxpools = nr_node_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int node;
+ 	int err;
+@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
+ 	switch (m->mode) {
+ 	case SVC_POOL_PERCPU:
+ 	{
+-		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
++		set_cpus_allowed(task, cpumask_of_cpu(node));
+ 		break;
+ 	}
+ 	case SVC_POOL_PERNODE:
+ 	{
+-		node_to_cpumask_ptr(nodecpumask, node);
+-		set_cpus_allowed_ptr(task, nodecpumask);
++		set_cpus_allowed(task, node_to_cpumask(node));
+ 		break;
+ 	}
+ 	}
+@@ -831,7 +830,7 @@ svc_process(struct svc_rqst *rqstp)
+ 	rqstp->rq_res.tail[0].iov_base = NULL;
+ 	rqstp->rq_res.tail[0].iov_len = 0;
+ 	/* Will be turned off only in gss privacy case: */
+-	rqstp->rq_splice_ok = 1;
++	rqstp->rq_sendfile_ok = 1;
+ 
+ 	/* Setup reply header */
+ 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index f24800f..b30d725 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -678,7 +678,7 @@ int
+ svcauth_unix_set_client(struct svc_rqst *rqstp)
+ {
+ 	struct sockaddr_in *sin;
+-	struct sockaddr_in6 *sin6, sin6_storage;
++	struct sockaddr_in6 *sin6 = NULL, sin6_storage;
+ 	struct ip_map *ipm;
+ 
+ 	switch (rqstp->rq_addr.ss_family) {
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 3e65719..cbb47a6 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -472,12 +472,16 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
+ 	if (len < 0)
+ 		return len;
+ 	rqstp->rq_addrlen = len;
+-	if (skb->tstamp.tv64 == 0) {
+-		skb->tstamp = ktime_get_real();
++	if (skb->tstamp.off_sec == 0) {
++		struct timeval tv;
++
++		tv.tv_sec = xtime.tv_sec;
++		tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
++		skb_set_timestamp(skb, &tv);
+ 		/* Don't enable netstamp, sunrpc doesn't
+ 		   need that much accuracy */
+ 	}
+-	svsk->sk_sk->sk_stamp = skb->tstamp;
++	skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
+ 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
+ 
+ 	/*
+diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
+index 5231f7a..1482e34 100644
+--- a/net/sunrpc/sysctl.c
++++ b/net/sunrpc/sysctl.c
+@@ -135,6 +135,7 @@ done:
+ 
+ static ctl_table debug_table[] = {
+ 	{
++		.ctl_name	= CTL_RPCDEBUG, 
+ 		.procname	= "rpc_debug",
+ 		.data		= &rpc_debug,
+ 		.maxlen		= sizeof(int),
+@@ -142,6 +143,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDEBUG,
+ 		.procname	= "nfs_debug",
+ 		.data		= &nfs_debug,
+ 		.maxlen		= sizeof(int),
+@@ -149,6 +151,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDDEBUG,
+ 		.procname	= "nfsd_debug",
+ 		.data		= &nfsd_debug,
+ 		.maxlen		= sizeof(int),
+@@ -156,6 +159,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NLMDEBUG,
+ 		.procname	= "nlm_debug",
+ 		.data		= &nlm_debug,
+ 		.maxlen		= sizeof(int),
+@@ -163,6 +167,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_TRANSPORT,
+ 		.procname	= "transports",
+ 		.maxlen		= 256,
+ 		.mode		= 0444,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 6fb493c..761ad29 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -247,10 +247,6 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
+ 
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
+-
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an SQ
+@@ -411,10 +407,6 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
+ 
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
+-
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an RQ
+@@ -1116,9 +1108,6 @@ static void __svc_rdma_free(struct work_struct *work)
+ 		container_of(work, struct svcxprt_rdma, sc_work);
+ 	dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
+ 
+-	/* We should only be called from kref_put */
+-	BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
+-
+ 	/*
+ 	 * Destroy queued, but not processed read completions. Note
+ 	 * that this cleanup has to be done before destroying the
+diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
+index 8710117..ce94fa4 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma.c
++++ b/net/sunrpc/xprtrdma/svc_rdma.c
+@@ -116,6 +116,7 @@ static int read_reset_stat(ctl_table *table, int write,
+ static struct ctl_table_header *svcrdma_table_header;
+ static ctl_table svcrdma_parm_table[] = {
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_requests",
+ 		.data		= &svcrdma_max_requests,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -126,6 +127,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.extra2		= &max_max_requests
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_req_size",
+ 		.data		= &svcrdma_max_req_size,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -136,6 +138,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.extra2		= &max_max_inline
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_outbound_read_requests",
+ 		.data		= &svcrdma_ord,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -147,6 +150,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 	},
+ 
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_read",
+ 		.data		= &rdma_stat_read,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -154,6 +158,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_recv",
+ 		.data		= &rdma_stat_recv,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -161,6 +166,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_write",
+ 		.data		= &rdma_stat_write,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -168,6 +174,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_starve",
+ 		.data		= &rdma_stat_sq_starve,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -175,6 +182,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_starve",
+ 		.data		= &rdma_stat_rq_starve,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -182,6 +190,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_poll",
+ 		.data		= &rdma_stat_rq_poll,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -189,6 +198,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_prod",
+ 		.data		= &rdma_stat_rq_prod,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -196,6 +206,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_poll",
+ 		.data		= &rdma_stat_sq_poll,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -203,6 +214,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_prod",
+ 		.data		= &rdma_stat_sq_prod,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -216,6 +228,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 
+ static ctl_table svcrdma_table[] = {
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "svc_rdma",
+ 		.mode		= 0555,
+ 		.child		= svcrdma_parm_table

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/2_misc_device_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/2_misc_device_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/2_misc_device_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,56 @@
+>Post a replacement to 2_misc_device_to_2_6_19.patch, we'll test.
+
+I did not test this patch, but you can try replacing the contents of
+the 2_misc_device_to_2_6_19.patch with the changes below.  (It's
+possible that this may lead to some conflict further down in the patch
+chain...)  The function prototype for show_abi_version changed between
+2.6.20 to 2.6.19; this was the missing piece in the original backport
+patch.  I would have expected a build warning for this.
+
+Signed-off-by: Sean Hefty <sean.hefty at intel.com>
+
+---
+---
+ drivers/infiniband/core/ucma.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/ucma.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/ucma.c
++++ ofed_kernel/drivers/infiniband/core/ucma.c
+@@ -1207,13 +1207,11 @@ static struct miscdevice ucma_misc = {
+ 	.fops	= &ucma_fops,
+ };
+ 
+-static ssize_t show_abi_version(struct device *dev,
+-				struct device_attribute *attr,
+-				char *buf)
++static ssize_t show_abi_version(struct class_device *class_dev, char *buf)
+ {
+ 	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
+ }
+-static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
++static CLASS_DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+ 
+ static int __init ucma_init(void)
+ {
+@@ -1223,7 +1221,8 @@ static int __init ucma_init(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
++	ret = class_device_create_file(ucma_misc.class,
++				       &class_device_attr_abi_version);
+ 	if (ret) {
+ 		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+ 		goto err;
+@@ -1236,7 +1235,8 @@ err:
+ 
+ static void __exit ucma_cleanup(void)
+ {
+-	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
++	class_device_remove_file(ucma_misc.class,
++				 &class_device_attr_abi_version);
+ 	misc_deregister(&ucma_misc);
+ 	idr_destroy(&ctx_idr);
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cma_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cma_to_2_6_23.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cma_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+---
+ drivers/infiniband/core/cma.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/cma.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/cma.c
++++ ofed_kernel/drivers/infiniband/core/cma.c
+@@ -2835,13 +2835,10 @@ static int cma_netdev_callback(struct no
+ 	struct rdma_id_private *id_priv;
+ 	int ret = NOTIFY_DONE;
+ 
+-	if (dev_net(ndev) != &init_net)
+-		return NOTIFY_DONE;
+-
+ 	if (event != NETDEV_BONDING_FAILOVER)
+ 		return NOTIFY_DONE;
+ 
+-	if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
++	if (!(ndev->flags & IFF_MASTER))
+ 		return NOTIFY_DONE;
+ 
+ 	mutex_lock(&lock);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_0_sysfs_to_2_6_25.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_0_sysfs_to_2_6_25.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_0_sysfs_to_2_6_25.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,81 @@
+---
+ drivers/infiniband/core/cm.c          |    4 ++--
+ drivers/infiniband/core/user_mad.c    |   14 ++++++++------
+ drivers/infiniband/core/uverbs_main.c |   11 +++++------
+ 3 files changed, 15 insertions(+), 14 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/cm.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/cm.c
++++ ofed_kernel/drivers/infiniband/core/cm.c
+@@ -3738,8 +3738,8 @@ static void cm_add_one(struct ib_device 
+ 	cm_dev->ib_device = ib_device;
+ 	cm_get_ack_delay(cm_dev);
+ 
+-	cm_dev->device = device_create_drvdata(&cm_class, &ib_device->dev,
+-					       MKDEV(0, 0), NULL,
++	cm_dev->device = device_create(&cm_class, &ib_device->dev,
++					       MKDEV(0, 0),
+ 					       "%s", ib_device->name);
+ 	if (!cm_dev->device) {
+ 		kfree(cm_dev);
+Index: ofed_kernel/drivers/infiniband/core/user_mad.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/user_mad.c
++++ ofed_kernel/drivers/infiniband/core/user_mad.c
+@@ -1016,9 +1016,8 @@ static int ib_umad_init_port(struct ib_d
+ 	if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
+ 		goto err_cdev;
+ 
+-	port->dev = device_create_drvdata(umad_class, device->dma_device,
+-					  port->cdev->dev, port,
+-					  "umad%d", port->dev_num);
++	port->dev = device_create(umad_class, device->dma_device,
++				  port->cdev->dev, "umad%d", port->dev_num);
+ 	if (IS_ERR(port->dev))
+ 		goto err_cdev;
+ 
+@@ -1036,12 +1035,15 @@ static int ib_umad_init_port(struct ib_d
+ 	if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
+ 		goto err_sm_cdev;
+ 
+-	port->sm_dev = device_create_drvdata(umad_class, device->dma_device,
+-					     port->sm_cdev->dev, port,
+-					     "issm%d", port->dev_num);
++	port->sm_dev = device_create(umad_class, device->dma_device,
++				     port->sm_cdev->dev,
++				     "issm%d", port->dev_num);
+ 	if (IS_ERR(port->sm_dev))
+ 		goto err_sm_cdev;
+ 
++	dev_set_drvdata(port->dev,    port);
++	dev_set_drvdata(port->sm_dev, port);
++
+ 	if (device_create_file(port->sm_dev, &dev_attr_ibdev))
+ 		goto err_sm_dev;
+ 	if (device_create_file(port->sm_dev, &dev_attr_port))
+Index: ofed_kernel/drivers/infiniband/core/uverbs_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/uverbs_main.c
++++ ofed_kernel/drivers/infiniband/core/uverbs_main.c
+@@ -802,15 +802,14 @@ static void ib_uverbs_add_one(struct ib_
+ 	if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
+ 		goto err_cdev;
+ 
+-	uverbs_dev->dev = device_create_drvdata(uverbs_class,
+-						device->dma_device,
+-						uverbs_dev->cdev->dev,
+-						uverbs_dev,
+-						"uverbs%d",
+-						uverbs_dev->devnum);
++	uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
++					uverbs_dev->cdev->dev,
++					"uverbs%d", uverbs_dev->devnum);
+ 	if (IS_ERR(uverbs_dev->dev))
+ 		goto err_cdev;
+ 
++	dev_set_drvdata(uverbs_dev->dev, uverbs_dev);
++
+ 	if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
+ 		goto err_class;
+ 	if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_1_kobject_backport.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_1_kobject_backport.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_1_kobject_backport.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,191 @@
+---
+ drivers/infiniband/core/Makefile           |    4 
+ drivers/infiniband/core/kobject_backport.c |  160 +++++++++++++++++++++++++++++
+ 2 files changed, 162 insertions(+), 2 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/Makefile
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/Makefile
++++ ofed_kernel/drivers/infiniband/core/Makefile
+@@ -8,14 +8,14 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=	
+ 					$(user_access-y)
+ 
+ ib_core-y :=			packer.o ud_header.o verbs.o sysfs.o \
+-				device.o fmr_pool.o cache.o
++				device.o fmr_pool.o cache.o kobject_backport.o
+ ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
+ 
+ ib_mad-y :=			mad.o smi.o agent.o mad_rmpp.o
+ 
+ ib_sa-y :=			sa_query.o multicast.o notice.o local_sa.o
+ 
+-ib_cm-y :=			cm.o
++ib_cm-y :=			cm.o kobject_backport.o
+ 
+ iw_cm-y :=			iwcm.o
+ 
+Index: ofed_kernel/drivers/infiniband/core/kobject_backport.c
+===================================================================
+--- /dev/null
++++ ofed_kernel/drivers/infiniband/core/kobject_backport.c
+@@ -0,0 +1,160 @@
++#include <linux/slab.h>
++#include <linux/kobject.h>
++
++struct kobj_attribute {
++	struct attribute attr;
++	ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
++			char *buf);
++	ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
++			 const char *buf, size_t count);
++};
++
++/* default kobject attribute operations */
++static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr,
++			      char *buf)
++{
++	struct kobj_attribute *kattr;
++	ssize_t ret = -EIO;
++
++	kattr = container_of(attr, struct kobj_attribute, attr);
++	if (kattr->show)
++		ret = kattr->show(kobj, kattr, buf);
++	return ret;
++}
++
++static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
++			       const char *buf, size_t count)
++{
++	struct kobj_attribute *kattr;
++	ssize_t ret = -EIO;
++
++	kattr = container_of(attr, struct kobj_attribute, attr);
++	if (kattr->store)
++		ret = kattr->store(kobj, kattr, buf, count);
++	return ret;
++}
++
++static struct sysfs_ops kobj_sysfs_ops = {
++	.show   = kobj_attr_show,
++	.store  = kobj_attr_store,
++};
++
++static void dynamic_kobj_release(struct kobject *kobj)
++{
++	pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__);
++	kfree(kobj);
++}
++
++static struct kobj_type dynamic_kobj_ktype = {
++	.release        = dynamic_kobj_release,
++	.sysfs_ops      = &kobj_sysfs_ops,
++};
++
++/**
++ * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs
++ *
++ * @name: the name for the kset
++ * @parent: the parent kobject of this kobject, if any.
++ *
++ * This function creates a kobject structure dynamically and registers it
++ * with sysfs.  When you are finished with this structure, call
++ * kobject_put() and the structure will be dynamically freed when
++ * it is no longer being used.
++ *
++ * If the kobject was not able to be created, NULL will be returned.
++ */
++struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
++{
++	struct kobject *kobj;
++	int retval;
++
++	kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
++	if (!kobj)
++		return NULL;
++
++	kobject_init(kobj);
++	kobj->ktype = &dynamic_kobj_ktype;
++	kobj->parent = parent;
++
++	retval = kobject_set_name(kobj, "%s", name);
++	if (retval) {
++		printk(KERN_WARNING "%s: kobject_set_name error: %d\n",
++			__FUNCTION__, retval);
++		goto err;
++	}
++
++	retval = kobject_add(kobj);
++	if (retval) {
++		printk(KERN_WARNING "%s: kobject_add error: %d\n",
++			__FUNCTION__, retval);
++		goto err;
++	}
++
++	return kobj;
++
++err:
++	kobject_put(kobj);
++	return NULL;
++}
++
++/**
++ * kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy
++ * @kobj: pointer to the kobject to initialize
++ * @ktype: pointer to the ktype for this kobject.
++ * @parent: pointer to the parent of this kobject.
++ * @fmt: the name of the kobject.
++ *
++ * This function combines the call to kobject_init() and
++ * kobject_add().  The same type of error handling after a call to
++ * kobject_add() and kobject lifetime rules are the same here.
++ */
++int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
++                         struct kobject *parent, const char *fmt, ...)
++{
++	int retval;
++	int limit;
++	int need;
++	va_list args;
++	char *name;
++
++	/* find out how big a buffer we need */
++	name = kmalloc(1024, GFP_KERNEL);
++	if (!name) {
++		retval = -ENOMEM;
++		goto out;
++	}
++	va_start(args, fmt);
++	need = vsnprintf(name, 1024, fmt, args);
++	va_end(args);
++	kfree(name);
++
++	/* Allocate the new space and copy the string in */
++	limit = need + 1;
++	name = kmalloc(limit, GFP_KERNEL);
++	if (!name) {
++		retval = -ENOMEM;
++		goto out;
++	}
++
++	va_start(args, fmt);
++	need = vsnprintf(name, limit, fmt, args);
++	va_end(args);
++
++	kobject_init(kobj);
++
++	kobj->ktype = ktype;
++	kobj->parent = parent;
++
++	retval = kobject_set_name(kobj, name);
++	kfree(name);
++	if (retval)
++		goto out;
++
++	retval = kobject_add(kobj);
++	if (retval)
++		goto out;
++
++out:
++	return retval;
++}
++

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_kobject_unregister_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_kobject_unregister_to_2_6_24.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_kobject_unregister_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,64 @@
+---
+ drivers/infiniband/core/cm.c    |    8 ++++----
+ drivers/infiniband/core/sysfs.c |    8 +++++---
+ 2 files changed, 9 insertions(+), 7 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/cm.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/cm.c
++++ ofed_kernel/drivers/infiniband/core/cm.c
+@@ -3696,8 +3696,8 @@ static int cm_create_port_fs(struct cm_p
+ 
+ error:
+ 	while (i--)
+-		kobject_put(&port->counter_group[i].obj);
+-	kobject_put(&port->port_obj);
++		kobject_unregister(&port->counter_group[i].obj);
++	kobject_unregister(&port->port_obj);
+ 	return ret;
+ 
+ }
+@@ -3707,9 +3707,9 @@ static void cm_remove_port_fs(struct cm_
+ 	int i;
+ 
+ 	for (i = 0; i < CM_COUNTER_GROUPS; i++)
+-		kobject_put(&port->counter_group[i].obj);
++		kobject_unregister(&port->counter_group[i].obj);
+ 
+-	kobject_put(&port->port_obj);
++	kobject_unregister(&port->port_obj);
+ }
+ 
+ static void cm_add_one(struct ib_device *ib_device)
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c
++++ ofed_kernel/drivers/infiniband/core/sysfs.c
+@@ -838,11 +838,11 @@ err_put:
+ 			sysfs_remove_group(p, &pma_group);
+ 			sysfs_remove_group(p, &port->pkey_group);
+ 			sysfs_remove_group(p, &port->gid_group);
+-			kobject_put(p);
++			kobject_unregister(p);
+ 		}
+ 	}
+ 
+-	kobject_put(&class_dev->kobj);
++	kobject_unregister(&class_dev->kobj);
+ 
+ err_unregister:
+ 	device_unregister(class_dev);
+@@ -862,10 +862,12 @@ void ib_device_unregister_sysfs(struct i
+ 		sysfs_remove_group(p, &pma_group);
+ 		sysfs_remove_group(p, &port->pkey_group);
+ 		sysfs_remove_group(p, &port->gid_group);
+-		kobject_put(p);
++		kobject_unregister(p);
+ 	}
+ 
+ 	kobject_put(device->ports_parent);
++	/* WA for memory leak */
++	kfree(device->ports_parent);
+ 	device_unregister(&device->dev);
+ }
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_3_sysfs_to_2_6_18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_3_sysfs_to_2_6_18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_3_sysfs_to_2_6_18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,761 @@
+---
+ drivers/infiniband/core/cm.c          |    2 
+ drivers/infiniband/core/sysfs.c       |  172 ++++++++++++++++------------------
+ drivers/infiniband/core/ucm.c         |   62 ++++++------
+ drivers/infiniband/core/user_mad.c    |  109 ++++++++++-----------
+ drivers/infiniband/core/uverbs.h      |    4 
+ drivers/infiniband/core/uverbs_main.c |   51 ++++------
+ include/rdma/ib_verbs.h               |    2 
+ 7 files changed, 199 insertions(+), 203 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/cm.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/cm.c
++++ ofed_kernel/drivers/infiniband/core/cm.c
+@@ -3738,7 +3738,7 @@ static void cm_add_one(struct ib_device 
+ 	cm_dev->ib_device = ib_device;
+ 	cm_get_ack_delay(cm_dev);
+ 
+-	cm_dev->device = device_create(&cm_class, &ib_device->dev,
++	cm_dev->device = device_create(&cm_class, ib_device->class_dev.dev,
+ 					       MKDEV(0, 0),
+ 					       "%s", ib_device->name);
+ 	if (!cm_dev->device) {
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c
++++ ofed_kernel/drivers/infiniband/core/sysfs.c
+@@ -425,25 +425,28 @@ static struct kobj_type port_type = {
+ 	.default_attrs = port_default_attrs
+ };
+ 
+-static void ib_device_release(struct device *device)
++static void ib_device_release(struct class_device *cdev)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
+ 
+ 	kfree(dev);
+ }
+ 
+-static int ib_device_uevent(struct device *device,
+-			    struct kobj_uevent_env *env)
++static int ib_device_uevent(struct class_device *cdev, char **envp,
++			    int num_envp, char *buf, int size)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
++	int i = 0, len = 0;
+ 
+-	if (add_uevent_var(env, "NAME=%s", dev->name))
++	if (add_uevent_var(envp, num_envp, &i, buf, size, &len,
++			   "NAME=%s", dev->name))
+ 		return -ENOMEM;
+ 
+ 	/*
+ 	 * It would be nice to pass the node GUID with the event...
+ 	 */
+ 
++	envp[i] = NULL;
+ 	return 0;
+ }
+ 
+@@ -565,10 +568,9 @@ err_put:
+ 	return ret;
+ }
+ 
+-static ssize_t show_node_type(struct device *device,
+-			      struct device_attribute *attr, char *buf)
++static ssize_t show_node_type(struct class_device *cdev, char *buf)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
+ 
+ 	if (!ibdev_is_alive(dev))
+ 		return -ENODEV;
+@@ -582,10 +584,9 @@ static ssize_t show_node_type(struct dev
+ 	}
+ }
+ 
+-static ssize_t show_sys_image_guid(struct device *device,
+-				   struct device_attribute *dev_attr, char *buf)
++static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
+ 	struct ib_device_attr attr;
+ 	ssize_t ret;
+ 
+@@ -603,10 +604,9 @@ static ssize_t show_sys_image_guid(struc
+ 		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
+ }
+ 
+-static ssize_t show_node_guid(struct device *device,
+-			      struct device_attribute *attr, char *buf)
++static ssize_t show_node_guid(struct class_device *cdev, char *buf)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
+ 
+ 	if (!ibdev_is_alive(dev))
+ 		return -ENODEV;
+@@ -618,19 +618,17 @@ static ssize_t show_node_guid(struct dev
+ 		       be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
+ }
+ 
+-static ssize_t show_node_desc(struct device *device,
+-			      struct device_attribute *attr, char *buf)
++static ssize_t show_node_desc(struct class_device *cdev, char *buf)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
+ 
+ 	return sprintf(buf, "%.64s\n", dev->node_desc);
+ }
+ 
+-static ssize_t set_node_desc(struct device *device,
+-			     struct device_attribute *attr,
+-			     const char *buf, size_t count)
++static ssize_t set_node_desc(struct class_device *cdev, const char *buf,
++			      size_t count)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
+ 	struct ib_device_modify desc = {};
+ 	int ret;
+ 
+@@ -645,30 +643,30 @@ static ssize_t set_node_desc(struct devi
+ 	return count;
+ }
+ 
+-static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+-static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
+-static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
+-static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
+-
+-static struct device_attribute *ib_class_attributes[] = {
+-	&dev_attr_node_type,
+-	&dev_attr_sys_image_guid,
+-	&dev_attr_node_guid,
+-	&dev_attr_node_desc
++static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
++static CLASS_DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
++static CLASS_DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
++static CLASS_DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
++
++static struct class_device_attribute *ib_class_attributes[] = {
++	&class_device_attr_node_type,
++	&class_device_attr_sys_image_guid,
++	&class_device_attr_node_guid,
++	&class_device_attr_node_desc
+ };
+ 
+ static struct class ib_class = {
+ 	.name    = "infiniband",
+-	.dev_release = ib_device_release,
+-	.dev_uevent = ib_device_uevent,
++	.release = ib_device_release,
++	.uevent = ib_device_uevent,
+ };
+ 
+ /* Show a given an attribute in the statistics group */
+-static ssize_t show_protocol_stat(const struct device *device,
+-			    struct device_attribute *attr, char *buf,
++static ssize_t show_protocol_stat(struct class_device *cdev,
++			    char *buf,
+ 			    unsigned offset)
+ {
+-	struct ib_device *dev = container_of(device, struct ib_device, dev);
++	struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
+ 	union rdma_protocol_stats stats;
+ 	ssize_t ret;
+ 
+@@ -682,14 +680,14 @@ static ssize_t show_protocol_stat(const 
+ 
+ /* generate a read-only iwarp statistics attribute */
+ #define IW_STATS_ENTRY(name)						\
+-static ssize_t show_##name(struct device *device,			\
+-			   struct device_attribute *attr, char *buf)	\
++static ssize_t show_##name(struct class_device *cdev,			\
++			   char *buf)					\
+ {									\
+-	return show_protocol_stat(device, attr, buf,			\
++	return show_protocol_stat(cdev, buf,			\
+ 				  offsetof(struct iw_protocol_stats, name) / \
+ 				  sizeof (u64));			\
+ }									\
+-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+ 
+ IW_STATS_ENTRY(ipInReceives);
+ IW_STATS_ENTRY(ipInHdrErrors);
+@@ -731,44 +729,44 @@ IW_STATS_ENTRY(tcpInErrs);
+ IW_STATS_ENTRY(tcpOutRsts);
+ 
+ static struct attribute *iw_proto_stats_attrs[] = {
+-	&dev_attr_ipInReceives.attr,
+-	&dev_attr_ipInHdrErrors.attr,
+-	&dev_attr_ipInTooBigErrors.attr,
+-	&dev_attr_ipInNoRoutes.attr,
+-	&dev_attr_ipInAddrErrors.attr,
+-	&dev_attr_ipInUnknownProtos.attr,
+-	&dev_attr_ipInTruncatedPkts.attr,
+-	&dev_attr_ipInDiscards.attr,
+-	&dev_attr_ipInDelivers.attr,
+-	&dev_attr_ipOutForwDatagrams.attr,
+-	&dev_attr_ipOutRequests.attr,
+-	&dev_attr_ipOutDiscards.attr,
+-	&dev_attr_ipOutNoRoutes.attr,
+-	&dev_attr_ipReasmTimeout.attr,
+-	&dev_attr_ipReasmReqds.attr,
+-	&dev_attr_ipReasmOKs.attr,
+-	&dev_attr_ipReasmFails.attr,
+-	&dev_attr_ipFragOKs.attr,
+-	&dev_attr_ipFragFails.attr,
+-	&dev_attr_ipFragCreates.attr,
+-	&dev_attr_ipInMcastPkts.attr,
+-	&dev_attr_ipOutMcastPkts.attr,
+-	&dev_attr_ipInBcastPkts.attr,
+-	&dev_attr_ipOutBcastPkts.attr,
+-	&dev_attr_tcpRtoAlgorithm.attr,
+-	&dev_attr_tcpRtoMin.attr,
+-	&dev_attr_tcpRtoMax.attr,
+-	&dev_attr_tcpMaxConn.attr,
+-	&dev_attr_tcpActiveOpens.attr,
+-	&dev_attr_tcpPassiveOpens.attr,
+-	&dev_attr_tcpAttemptFails.attr,
+-	&dev_attr_tcpEstabResets.attr,
+-	&dev_attr_tcpCurrEstab.attr,
+-	&dev_attr_tcpInSegs.attr,
+-	&dev_attr_tcpOutSegs.attr,
+-	&dev_attr_tcpRetransSegs.attr,
+-	&dev_attr_tcpInErrs.attr,
+-	&dev_attr_tcpOutRsts.attr,
++	&class_device_attr_ipInReceives.attr,
++	&class_device_attr_ipInHdrErrors.attr,
++	&class_device_attr_ipInTooBigErrors.attr,
++	&class_device_attr_ipInNoRoutes.attr,
++	&class_device_attr_ipInAddrErrors.attr,
++	&class_device_attr_ipInUnknownProtos.attr,
++	&class_device_attr_ipInTruncatedPkts.attr,
++	&class_device_attr_ipInDiscards.attr,
++	&class_device_attr_ipInDelivers.attr,
++	&class_device_attr_ipOutForwDatagrams.attr,
++	&class_device_attr_ipOutRequests.attr,
++	&class_device_attr_ipOutDiscards.attr,
++	&class_device_attr_ipOutNoRoutes.attr,
++	&class_device_attr_ipReasmTimeout.attr,
++	&class_device_attr_ipReasmReqds.attr,
++	&class_device_attr_ipReasmOKs.attr,
++	&class_device_attr_ipReasmFails.attr,
++	&class_device_attr_ipFragOKs.attr,
++	&class_device_attr_ipFragFails.attr,
++	&class_device_attr_ipFragCreates.attr,
++	&class_device_attr_ipInMcastPkts.attr,
++	&class_device_attr_ipOutMcastPkts.attr,
++	&class_device_attr_ipInBcastPkts.attr,
++	&class_device_attr_ipOutBcastPkts.attr,
++	&class_device_attr_tcpRtoAlgorithm.attr,
++	&class_device_attr_tcpRtoMin.attr,
++	&class_device_attr_tcpRtoMax.attr,
++	&class_device_attr_tcpMaxConn.attr,
++	&class_device_attr_tcpActiveOpens.attr,
++	&class_device_attr_tcpPassiveOpens.attr,
++	&class_device_attr_tcpAttemptFails.attr,
++	&class_device_attr_tcpEstabResets.attr,
++	&class_device_attr_tcpCurrEstab.attr,
++	&class_device_attr_tcpInSegs.attr,
++	&class_device_attr_tcpOutSegs.attr,
++	&class_device_attr_tcpRetransSegs.attr,
++	&class_device_attr_tcpInErrs.attr,
++	&class_device_attr_tcpOutRsts.attr,
+ 	NULL
+ };
+ 
+@@ -779,23 +777,23 @@ static struct attribute_group iw_stats_g
+ 
+ int ib_device_register_sysfs(struct ib_device *device)
+ {
+-	struct device *class_dev = &device->dev;
++	struct class_device *class_dev = &device->class_dev;
+ 	int ret;
+ 	int i;
+ 
+ 	class_dev->class      = &ib_class;
+-	class_dev->driver_data = device;
+-	class_dev->parent     = device->dma_device;
+-	strlcpy(class_dev->bus_id, device->name, BUS_ID_SIZE);
++	class_dev->class_data = device;
++	class_dev->dev	      = device->dma_device;
++	strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE);
+ 
+ 	INIT_LIST_HEAD(&device->port_list);
+ 
+-	ret = device_register(class_dev);
++	ret = class_device_register(class_dev);
+ 	if (ret)
+ 		goto err;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
+-		ret = device_create_file(class_dev, ib_class_attributes[i]);
++		ret = class_device_create_file(class_dev, ib_class_attributes[i]);
+ 		if (ret)
+ 			goto err_unregister;
+ 	}
+@@ -845,7 +843,7 @@ err_put:
+ 	kobject_unregister(&class_dev->kobj);
+ 
+ err_unregister:
+-	device_unregister(class_dev);
++	class_device_unregister(class_dev);
+ 
+ err:
+ 	return ret;
+@@ -868,7 +866,7 @@ void ib_device_unregister_sysfs(struct i
+ 	kobject_put(device->ports_parent);
+ 	/* WA for memory leak */
+ 	kfree(device->ports_parent);
+-	device_unregister(&device->dev);
++	class_device_unregister(&device->class_dev);
+ }
+ 
+ int ib_sysfs_setup(void)
+Index: ofed_kernel/drivers/infiniband/core/ucm.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/ucm.c
++++ ofed_kernel/drivers/infiniband/core/ucm.c
+@@ -56,8 +56,8 @@ MODULE_LICENSE("Dual BSD/GPL");
+ 
+ struct ib_ucm_device {
+ 	int			devnum;
+-	struct cdev		cdev;
+-	struct device		dev;
++	struct cdev		dev;
++	struct class_device	class_dev;
+ 	struct ib_device	*ib_dev;
+ };
+ 
+@@ -1177,7 +1177,7 @@ static int ib_ucm_open(struct inode *ino
+ 
+ 	filp->private_data = file;
+ 	file->filp = filp;
+-	file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
++	file->device = container_of(inode->i_cdev, struct ib_ucm_device, dev);
+ 
+ 	return 0;
+ }
+@@ -1208,14 +1208,14 @@ static int ib_ucm_close(struct inode *in
+ 	return 0;
+ }
+ 
+-static void ib_ucm_release_dev(struct device *dev)
++static void ucm_release_class_dev(struct class_device *class_dev)
+ {
+-	struct ib_ucm_device *ucm_dev;
++	struct ib_ucm_device *dev;
+ 
+-	ucm_dev = container_of(dev, struct ib_ucm_device, dev);
+-	cdev_del(&ucm_dev->cdev);
+-	clear_bit(ucm_dev->devnum, dev_map);
+-	kfree(ucm_dev);
++	dev = container_of(class_dev, struct ib_ucm_device, class_dev);
++	cdev_del(&dev->dev);
++	clear_bit(dev->devnum, dev_map);
++	kfree(dev);
+ }
+ 
+ static const struct file_operations ucm_fops = {
+@@ -1226,15 +1226,14 @@ static const struct file_operations ucm_
+ 	.poll    = ib_ucm_poll,
+ };
+ 
+-static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
+ {
+-	struct ib_ucm_device *ucm_dev;
++	struct ib_ucm_device *dev;
+ 
+-	ucm_dev = container_of(dev, struct ib_ucm_device, dev);
+-	return sprintf(buf, "%s\n", ucm_dev->ib_dev->name);
++	dev = container_of(class_dev, struct ib_ucm_device, class_dev);
++	return sprintf(buf, "%s\n", dev->ib_dev->name);
+ }
+-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
++static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+ 
+ static void ib_ucm_add_one(struct ib_device *device)
+ {
+@@ -1256,31 +1255,32 @@ static void ib_ucm_add_one(struct ib_dev
+ 
+ 	set_bit(ucm_dev->devnum, dev_map);
+ 
+-	cdev_init(&ucm_dev->cdev, &ucm_fops);
+-	ucm_dev->cdev.owner = THIS_MODULE;
+-	kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
+-	if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
++	cdev_init(&ucm_dev->dev, &ucm_fops);
++	ucm_dev->dev.owner = THIS_MODULE;
++	kobject_set_name(&ucm_dev->dev.kobj, "ucm%d", ucm_dev->devnum);
++	if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
+ 		goto err;
+ 
+-	ucm_dev->dev.class = &cm_class;
+-	ucm_dev->dev.parent = device->dma_device;
+-	ucm_dev->dev.devt = ucm_dev->cdev.dev;
+-	ucm_dev->dev.release = ib_ucm_release_dev;
+-	snprintf(ucm_dev->dev.bus_id, BUS_ID_SIZE, "ucm%d",
++	ucm_dev->class_dev.class = &cm_class;
++	ucm_dev->class_dev.dev = device->dma_device;
++	ucm_dev->class_dev.devt = ucm_dev->dev.dev;
++	ucm_dev->class_dev.release = ucm_release_class_dev;
++	snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
+ 		 ucm_dev->devnum);
+-	if (device_register(&ucm_dev->dev))
++	if (class_device_register(&ucm_dev->class_dev))
+ 		goto err_cdev;
+ 
+-	if (device_create_file(&ucm_dev->dev, &dev_attr_ibdev))
+-		goto err_dev;
++	if (class_device_create_file(&ucm_dev->class_dev,
++				     &class_device_attr_ibdev))
++		goto err_class;
+ 
+ 	ib_set_client_data(device, &ucm_client, ucm_dev);
+ 	return;
+ 
+-err_dev:
+-	device_unregister(&ucm_dev->dev);
++err_class:
++	class_device_unregister(&ucm_dev->class_dev);
+ err_cdev:
+-	cdev_del(&ucm_dev->cdev);
++	cdev_del(&ucm_dev->dev);
+ 	clear_bit(ucm_dev->devnum, dev_map);
+ err:
+ 	kfree(ucm_dev);
+@@ -1294,7 +1294,7 @@ static void ib_ucm_remove_one(struct ib_
+ 	if (!ucm_dev)
+ 		return;
+ 
+-	device_unregister(&ucm_dev->dev);
++	class_device_unregister(&ucm_dev->class_dev);
+ }
+ 
+ static ssize_t show_abi_version(struct class *class, char *buf)
+Index: ofed_kernel/drivers/infiniband/core/user_mad.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/user_mad.c
++++ ofed_kernel/drivers/infiniband/core/user_mad.c
+@@ -86,11 +86,11 @@ enum {
+  */
+ 
+ struct ib_umad_port {
+-	struct cdev           *cdev;
+-	struct device	      *dev;
++	struct cdev           *dev;
++	struct class_device   *class_dev;
+ 
+-	struct cdev           *sm_cdev;
+-	struct device	      *sm_dev;
++	struct cdev           *sm_dev;
++	struct class_device   *sm_class_dev;
+ 	struct semaphore       sm_sem;
+ 
+ 	struct mutex	       file_mutex;
+@@ -959,29 +959,27 @@ static struct ib_client umad_client = {
+ 	.remove = ib_umad_remove_one
+ };
+ 
+-static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
+ {
+-	struct ib_umad_port *port = dev_get_drvdata(dev);
++	struct ib_umad_port *port = class_get_devdata(class_dev);
+ 
+ 	if (!port)
+ 		return -ENODEV;
+ 
+ 	return sprintf(buf, "%s\n", port->ib_dev->name);
+ }
+-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
++static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+ 
+-static ssize_t show_port(struct device *dev, struct device_attribute *attr,
+-			 char *buf)
++static ssize_t show_port(struct class_device *class_dev, char *buf)
+ {
+-	struct ib_umad_port *port = dev_get_drvdata(dev);
++	struct ib_umad_port *port = class_get_devdata(class_dev);
+ 
+ 	if (!port)
+ 		return -ENODEV;
+ 
+ 	return sprintf(buf, "%d\n", port->port_num);
+ }
+-static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
++static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
+ 
+ static ssize_t show_abi_version(struct class *class, char *buf)
+ {
+@@ -1007,47 +1005,48 @@ static int ib_umad_init_port(struct ib_d
+ 	mutex_init(&port->file_mutex);
+ 	INIT_LIST_HEAD(&port->file_list);
+ 
+-	port->cdev = cdev_alloc();
+-	if (!port->cdev)
++	port->dev = cdev_alloc();
++	if (!port->dev)
+ 		return -1;
+-	port->cdev->owner = THIS_MODULE;
+-	port->cdev->ops   = &umad_fops;
+-	kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num);
+-	if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
++	port->dev->owner = THIS_MODULE;
++	port->dev->ops   = &umad_fops;
++	kobject_set_name(&port->dev->kobj, "umad%d", port->dev_num);
++	if (cdev_add(port->dev, base_dev + port->dev_num, 1))
+ 		goto err_cdev;
+ 
+-	port->dev = device_create(umad_class, device->dma_device,
+-				  port->cdev->dev, "umad%d", port->dev_num);
+-	if (IS_ERR(port->dev))
++	port->class_dev = class_device_create(umad_class, NULL, port->dev->dev,
++					      device->dma_device,
++					      "umad%d", port->dev_num);
++	if (IS_ERR(port->class_dev))
+ 		goto err_cdev;
+ 
+-	if (device_create_file(port->dev, &dev_attr_ibdev))
+-		goto err_dev;
+-	if (device_create_file(port->dev, &dev_attr_port))
+-		goto err_dev;
+-
+-	port->sm_cdev = cdev_alloc();
+-	if (!port->sm_cdev)
+-		goto err_dev;
+-	port->sm_cdev->owner = THIS_MODULE;
+-	port->sm_cdev->ops   = &umad_sm_fops;
+-	kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num);
+-	if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
++	if (class_device_create_file(port->class_dev, &class_device_attr_ibdev))
++		goto err_class;
++	if (class_device_create_file(port->class_dev, &class_device_attr_port))
++		goto err_class;
++
++	port->sm_dev = cdev_alloc();
++	if (!port->sm_dev)
++		goto err_class;
++	port->sm_dev->owner = THIS_MODULE;
++	port->sm_dev->ops   = &umad_sm_fops;
++	kobject_set_name(&port->sm_dev->kobj, "issm%d", port->dev_num);
++	if (cdev_add(port->sm_dev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
+ 		goto err_sm_cdev;
+ 
+-	port->sm_dev = device_create(umad_class, device->dma_device,
+-				     port->sm_cdev->dev,
+-				     "issm%d", port->dev_num);
+-	if (IS_ERR(port->sm_dev))
++	port->sm_class_dev = class_device_create(umad_class, NULL, port->sm_dev->dev,
++						 device->dma_device,
++						 "issm%d", port->dev_num);
++	if (IS_ERR(port->sm_class_dev))
+ 		goto err_sm_cdev;
+ 
+-	dev_set_drvdata(port->dev,    port);
+-	dev_set_drvdata(port->sm_dev, port);
++	class_set_devdata(port->class_dev,    port);
++	class_set_devdata(port->sm_class_dev, port);
+ 
+-	if (device_create_file(port->sm_dev, &dev_attr_ibdev))
+-		goto err_sm_dev;
+-	if (device_create_file(port->sm_dev, &dev_attr_port))
+-		goto err_sm_dev;
++	if (class_device_create_file(port->sm_class_dev, &class_device_attr_ibdev))
++		goto err_sm_class;
++	if (class_device_create_file(port->sm_class_dev, &class_device_attr_port))
++		goto err_sm_class;
+ 
+ 	spin_lock(&port_lock);
+ 	umad_port[port->dev_num] = port;
+@@ -1055,17 +1054,17 @@ static int ib_umad_init_port(struct ib_d
+ 
+ 	return 0;
+ 
+-err_sm_dev:
+-	device_destroy(umad_class, port->sm_cdev->dev);
++err_sm_class:
++	class_device_destroy(umad_class, port->sm_dev->dev);
+ 
+ err_sm_cdev:
+-	cdev_del(port->sm_cdev);
++	cdev_del(port->sm_dev);
+ 
+-err_dev:
+-	device_destroy(umad_class, port->cdev->dev);
++err_class:
++	class_device_destroy(umad_class, port->dev->dev);
+ 
+ err_cdev:
+-	cdev_del(port->cdev);
++	cdev_del(port->dev);
+ 	clear_bit(port->dev_num, dev_map);
+ 
+ 	return -1;
+@@ -1077,14 +1076,14 @@ static void ib_umad_kill_port(struct ib_
+ 	int already_dead;
+ 	int id;
+ 
+-	dev_set_drvdata(port->dev,    NULL);
+-	dev_set_drvdata(port->sm_dev, NULL);
++	class_set_devdata(port->class_dev,    NULL);
++	class_set_devdata(port->sm_class_dev, NULL);
+ 
+-	device_destroy(umad_class, port->cdev->dev);
+-	device_destroy(umad_class, port->sm_cdev->dev);
++	class_device_destroy(umad_class, port->dev->dev);
++	class_device_destroy(umad_class, port->sm_dev->dev);
+ 
+-	cdev_del(port->cdev);
+-	cdev_del(port->sm_cdev);
++	cdev_del(port->dev);
++	cdev_del(port->sm_dev);
+ 
+ 	spin_lock(&port_lock);
+ 	umad_port[port->dev_num] = NULL;
+Index: ofed_kernel/drivers/infiniband/core/uverbs.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/uverbs.h
++++ ofed_kernel/drivers/infiniband/core/uverbs.h
+@@ -71,8 +71,8 @@ struct ib_uverbs_device {
+ 	struct kref				ref;
+ 	struct completion			comp;
+ 	int					devnum;
+-	struct cdev			       *cdev;
+-	struct device			       *dev;
++	struct cdev			       *dev;
++	struct class_device		       *class_dev;
+ 	struct ib_device		       *ib_dev;
+ 	int					num_comp_vectors;
+ };
+Index: ofed_kernel/drivers/infiniband/core/uverbs_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/uverbs_main.c
++++ ofed_kernel/drivers/infiniband/core/uverbs_main.c
+@@ -737,29 +737,27 @@ static struct ib_client uverbs_client = 
+ 	.remove = ib_uverbs_remove_one
+ };
+ 
+-static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
+ {
+-	struct ib_uverbs_device *dev = dev_get_drvdata(device);
++	struct ib_uverbs_device *dev = class_get_devdata(class_dev);
+ 
+ 	if (!dev)
+ 		return -ENODEV;
+ 
+ 	return sprintf(buf, "%s\n", dev->ib_dev->name);
+ }
+-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
++static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+ 
+-static ssize_t show_dev_abi_version(struct device *device,
+-				    struct device_attribute *attr, char *buf)
++static ssize_t show_dev_abi_version(struct class_device *class_dev, char *buf)
+ {
+-	struct ib_uverbs_device *dev = dev_get_drvdata(device);
++	struct ib_uverbs_device *dev = class_get_devdata(class_dev);
+ 
+ 	if (!dev)
+ 		return -ENODEV;
+ 
+ 	return sprintf(buf, "%d\n", dev->ib_dev->uverbs_abi_ver);
+ }
+-static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
++static CLASS_DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
+ 
+ static ssize_t show_abi_version(struct class *class, char *buf)
+ {
+@@ -793,26 +791,27 @@ static void ib_uverbs_add_one(struct ib_
+ 	uverbs_dev->ib_dev           = device;
+ 	uverbs_dev->num_comp_vectors = device->num_comp_vectors;
+ 
+-	uverbs_dev->cdev = cdev_alloc();
+-	if (!uverbs_dev->cdev)
++	uverbs_dev->dev = cdev_alloc();
++	if (!uverbs_dev->dev)
+ 		goto err;
+-	uverbs_dev->cdev->owner = THIS_MODULE;
+-	uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+-	kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum);
+-	if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
++	uverbs_dev->dev->owner = THIS_MODULE;
++	uverbs_dev->dev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
++	kobject_set_name(&uverbs_dev->dev->kobj, "uverbs%d", uverbs_dev->devnum);
++	if (cdev_add(uverbs_dev->dev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
+ 		goto err_cdev;
+ 
+-	uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
+-					uverbs_dev->cdev->dev,
+-					"uverbs%d", uverbs_dev->devnum);
+-	if (IS_ERR(uverbs_dev->dev))
++	uverbs_dev->class_dev = class_device_create(uverbs_class, NULL,
++						    uverbs_dev->dev->dev,
++						    device->dma_device,
++						    "uverbs%d", uverbs_dev->devnum);
++	if (IS_ERR(uverbs_dev->class_dev))
+ 		goto err_cdev;
+ 
+-	dev_set_drvdata(uverbs_dev->dev, uverbs_dev);
++	class_set_devdata(uverbs_dev->class_dev, uverbs_dev);
+ 
+-	if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
++	if (class_device_create_file(uverbs_dev->class_dev, &class_device_attr_ibdev))
+ 		goto err_class;
+-	if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
++	if (class_device_create_file(uverbs_dev->class_dev, &class_device_attr_abi_version))
+ 		goto err_class;
+ 
+ 	spin_lock(&map_lock);
+@@ -824,10 +823,10 @@ static void ib_uverbs_add_one(struct ib_
+ 	return;
+ 
+ err_class:
+-	device_destroy(uverbs_class, uverbs_dev->cdev->dev);
++	class_device_destroy(uverbs_class, uverbs_dev->dev->dev);
+ 
+ err_cdev:
+-	cdev_del(uverbs_dev->cdev);
++	cdev_del(uverbs_dev->dev);
+ 	clear_bit(uverbs_dev->devnum, dev_map);
+ 
+ err:
+@@ -844,9 +843,9 @@ static void ib_uverbs_remove_one(struct 
+ 	if (!uverbs_dev)
+ 		return;
+ 
+-	dev_set_drvdata(uverbs_dev->dev, NULL);
+-	device_destroy(uverbs_class, uverbs_dev->cdev->dev);
+-	cdev_del(uverbs_dev->cdev);
++	class_set_devdata(uverbs_dev->class_dev, NULL);
++	class_device_destroy(uverbs_class, uverbs_dev->dev->dev);
++	cdev_del(uverbs_dev->dev);
+ 
+ 	spin_lock(&map_lock);
+ 	dev_table[uverbs_dev->devnum] = NULL;
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h
++++ ofed_kernel/include/rdma/ib_verbs.h
+@@ -1196,7 +1196,7 @@ struct ib_device {
+ 	struct ib_dma_mapping_ops   *dma_ops;
+ 
+ 	struct module               *owner;
+-	struct device                dev;
++	struct class_device          class_dev;
+ 	struct kobject               *ports_parent;
+ 	struct list_head             port_list;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_undo_weak_ordering.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_undo_weak_ordering.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_undo_weak_ordering.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,83 @@
+From 91f5e350089e023d485e42e6f30a7fcf28ea394c Mon Sep 17 00:00:00 2001
+From: Eli Cohen <eli at mellanox.co.il>
+Date: Tue, 28 Oct 2008 10:19:24 +0200
+Subject: [PATCH] Revert "ib_core: Use weak ordering for data registered memory"
+
+This reverts commit 4beb8b521a750990346adf47f549c7db5fd50893.
+
+Doing this for backports since the original patch requires API
+available in kernel 2.6.27 and newer.
+
+Signed-off-by: Eli Cohen <eli at mellanox.co.il>
+---
+ drivers/infiniband/core/umem.c |   12 ++----------
+ include/rdma/ib_umem.h         |    2 --
+ 2 files changed, 2 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index da5e247..6f7c096 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -40,10 +40,6 @@
+ 
+ #include "uverbs.h"
+ 
+-static int allow_weak_ordering;
+-module_param(allow_weak_ordering, bool, 0444);
+-MODULE_PARM_DESC(allow_weak_ordering,  "Allow weak ordering for data registered memory");
+-
+ #define IB_UMEM_MAX_PAGE_CHUNK						\
+ 	((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /	\
+ 	 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -	\
+@@ -55,8 +51,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
+ 	int i;
+ 
+ 	list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
+-		ib_dma_unmap_sg_attrs(dev, chunk->page_list,
+-				      chunk->nents, DMA_BIDIRECTIONAL, &chunk->attrs);
++		ib_dma_unmap_sg(dev, chunk->page_list,
++				chunk->nents, DMA_BIDIRECTIONAL);
+ 		for (i = 0; i < chunk->nents; ++i) {
+ 			struct page *page = sg_page(&chunk->page_list[i]);
+ 
+@@ -95,9 +91,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 
+ 	if (dmasync)
+ 		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+-	else if (allow_weak_ordering)
+-		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
+-
+ 
+ 	if (!can_do_mlock())
+ 		return ERR_PTR(-EPERM);
+@@ -176,7 +169,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 				goto out;
+ 			}
+ 
+-			chunk->attrs = attrs;
+ 			chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+ 			sg_init_table(chunk->page_list, chunk->nents);
+ 			for (i = 0; i < chunk->nents; ++i) {
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index 90f3712..9ee0d2e 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -36,7 +36,6 @@
+ #include <linux/list.h>
+ #include <linux/scatterlist.h>
+ #include <linux/workqueue.h>
+-#include <linux/dma-attrs.h>
+ 
+ struct ib_ucontext;
+ 
+@@ -57,7 +56,6 @@ struct ib_umem_chunk {
+ 	struct list_head	list;
+ 	int                     nents;
+ 	int                     nmap;
+-	struct dma_attrs	attrs;
+ 	struct scatterlist      page_list[0];
+ };
+ 
+-- 
+1.6.0.2
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0002_undo_250.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,158 @@
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
+ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
+ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ 		    int reset);
+-int t3_replay_prep_adapter(struct adapter *adapter);
+ void t3_led_ready(struct adapter *adapter);
+ void t3_fatal_err(struct adapter *adapter);
+ void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
+ 	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+ 		offload_close(&adapter->tdev);
+ 
++	/* Free sge resources */
++	t3_free_sge_resources(adapter);
++
+ 	adapter->flags &= ~FULL_INIT_DONE;
+ 
+ 	pci_disable_device(pdev);
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
+ 		goto err;
+ 	}
+ 	pci_set_master(pdev);
+-	pci_restore_state(pdev);
+-
+-	/* Free sge resources */
+-	t3_free_sge_resources(adapter);
+ 
+-	if (t3_replay_prep_adapter(adapter))
++	if (t3_prep_adapter(adapter, adapter->params.info, 1))
+ 		goto err;
+ 
+ 	return PCI_ERS_RESULT_RECOVERED;
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
+ 	}
+ 
+ 	pci_set_master(pdev);
+-	pci_save_state(pdev);
+ 
+ 	mmio_start = pci_resource_start(pdev, 0);
+ 	mmio_len = pci_resource_len(pdev, 0);
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
+@@ -444,14 +444,6 @@
+ 
+ #define A_PCIE_CFG 0x88
+ 
+-#define S_ENABLELINKDWNDRST    21
+-#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+-#define F_ENABLELINKDWNDRST    V_ENABLELINKDWNDRST(1U)
+-
+-#define S_ENABLELINKDOWNRST    20
+-#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+-#define F_ENABLELINKDOWNRST    V_ENABLELINKDOWNRST(1U)
+-
+ #define S_PCIE_CLIDECEN    16
+ #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
+ #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
+ }
+ 
+ /**
+- *	t3_reset_qset - reset a sge qset
+- *	@q: the queue set
+- *
+- *	Reset the qset structure.
+- *	the NAPI structure is preserved in the event of
+- *	the qset's reincarnation, for example during EEH recovery.
+- */
+-static void t3_reset_qset(struct sge_qset *q)
+-{
+-	if (q->adap &&
+-	    !(q->adap->flags & NAPI_INIT)) {
+-		memset(q, 0, sizeof(*q));
+-		return;
+-	}
+-
+-	q->adap = NULL;
+-	memset(&q->rspq, 0, sizeof(q->rspq));
+-	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+-	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+-	q->txq_stopped = 0;
+-	memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
+-	kfree(q->lro_frag_tbl);
+-	q->lro_nfrags = q->lro_frag_len = 0;
+-}
+-
+-
+-/**
+  *	free_qset - free the resources of an SGE queue set
+  *	@adapter: the adapter owning the queue set
+  *	@q: the queue set
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+ 				  q->rspq.desc, q->rspq.phys_addr);
+ 	}
+ 
+-	t3_reset_qset(q);
++	memset(q, 0, sizeof(*q));
+ }
+ 
+ /**
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
+ 
+ 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
+ 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
+-			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
+ 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
+ }
+ 
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
+ 			 F_GPIO0_OUT_VAL);
+ }
+ 
+-int t3_replay_prep_adapter(struct adapter *adapter)
+-{
+-	const struct adapter_info *ai = adapter->params.info;
+-	unsigned int i, j = -1;
+-	int ret;
+-
+-	early_hw_init(adapter, ai);
+-	ret = init_parity(adapter);
+-	if (ret)
+-		return ret;
+-
+-	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
+-		struct port_info *p = adap2pinfo(adapter, i);
+-
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
+-
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
+-		p->phy.ops->power_down(&p->phy, 1);
+-	}
+-
+-return 0;
+-}
+-

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0004_undo_240.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0004_undo_240.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0004_undo_240.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,120 @@
+commit 48c4b6dbb7e246957e13302668acf7c77e4f8b3a
+Author: Divy Le Ray <divy at chelsio.com>
+Date:   Tue May 6 19:25:56 2008 -0700
+
+    cxgb3 - fix port up/down error path
+    
+    Fix faiures path when ports are stopped and restarted
+    in EEH recovery.
+    
+    Signed-off-by: Divy Le Ray <divy at chelsio.com>
+    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
+
+---
+ drivers/net/cxgb3/adapter.h    |    1 -
+ drivers/net/cxgb3/cxgb3_main.c |   32 +++++++++++++++-----------------
+ 2 files changed, 15 insertions(+), 18 deletions(-)
+
+Index: ofed_kernel/drivers/net/cxgb3/adapter.h
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
++++ ofed_kernel/drivers/net/cxgb3/adapter.h
+@@ -72,7 +72,6 @@ enum {				/* adapter flags */
+ 	USING_MSIX = (1 << 2),
+ 	QUEUES_BOUND = (1 << 3),
+ 	TP_PARITY_INIT = (1 << 4),
+-	NAPI_INIT = (1 << 5),
+ };
+ 
+ struct fl_pg_chunk {
+Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
++++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+@@ -421,13 +421,6 @@ static void init_napi(struct adapter *ad
+ 			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
+ 				       64);
+ 	}
+-
+-	/*
+-	 * netif_napi_add() can be called only once per napi_struct because it
+-	 * adds each new napi_struct to a list.  Be careful not to call it a
+-	 * second time, e.g., during EEH recovery, by making a note of it.
+-	 */
+-	adap->flags |= NAPI_INIT;
+ }
+ 
+ /*
+@@ -903,8 +896,7 @@ static int cxgb_up(struct adapter *adap)
+ 			goto out;
+ 
+ 		setup_rss(adap);
+-		if (!(adap->flags & NAPI_INIT))
+-			init_napi(adap);
++		init_napi(adap);
+ 		adap->flags |= FULL_INIT_DONE;
+ 	}
+ 
+@@ -1007,7 +999,7 @@ static int offload_open(struct net_devic
+ 		return 0;
+ 
+ 	if (!adap_up && (err = cxgb_up(adapter)) < 0)
+-		goto out;
++		return err;
+ 
+ 	t3_tp_set_offload_mode(adapter, 1);
+ 	tdev->lldev = adapter->port[0];
+@@ -1069,8 +1061,10 @@ static int cxgb_open(struct net_device *
+ 	int other_ports = adapter->open_device_map & PORT_MASK;
+ 	int err;
+ 
+-	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
++	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
++		quiesce_rx(adapter);
+ 		return err;
++	}
+ 
+ 	set_bit(pi->port_id, &adapter->open_device_map);
+ 	if (is_offload(adapter) && !ofld_disable) {
+@@ -2456,7 +2450,7 @@ static pci_ers_result_t t3_io_error_dete
+ 
+ 	pci_disable_device(pdev);
+ 
+-	/* Request a slot reset. */
++	/* Request a slot slot reset. */
+ 	return PCI_ERS_RESULT_NEED_RESET;
+ }
+ 
+@@ -2473,16 +2467,13 @@ static pci_ers_result_t t3_io_slot_reset
+ 	if (pci_enable_device(pdev)) {
+ 		dev_err(&pdev->dev,
+ 			"Cannot re-enable PCI device after reset.\n");
+-		goto err;
++		return PCI_ERS_RESULT_DISCONNECT;
+ 	}
+ 	pci_set_master(pdev);
+ 
+-	if (t3_prep_adapter(adapter, adapter->params.info, 1))
+-		goto err;
++	t3_prep_adapter(adapter, adapter->params.info, 1);
+ 
+ 	return PCI_ERS_RESULT_RECOVERED;
+-err:
+-	return PCI_ERS_RESULT_DISCONNECT;
+ }
+ 
+ /**
+@@ -2511,6 +2502,13 @@ static void t3_io_resume(struct pci_dev 
+ 			netif_device_attach(netdev);
+ 		}
+ 	}
++
++	if (is_offload(adapter)) {
++		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
++		if (offload_open(adapter->port[0]))
++			printk(KERN_WARNING
++			       "Could not bring back offload capabilities\n");
++	}
+ }
+ 
+ static struct pci_error_handlers t3_err_handler = {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0008_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0008_pci_dma_mapping_error_to_2_6_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0008_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+---
+ drivers/net/cxgb3/sge.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: ofed_kernel/drivers/net/cxgb3/sge.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/sge.c
++++ ofed_kernel/drivers/net/cxgb3/sge.c
+@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *v
+ 	dma_addr_t mapping;
+ 
+ 	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
+-	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
++	if (unlikely(pci_dma_mapping_error(mapping)))
+ 		return -ENOMEM;
+ 
+ 	pci_unmap_addr_set(sd, dma_addr, mapping);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0010_napi.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,610 @@
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
+ 
+ struct vlan_group;
+ struct adapter;
+-struct sge_qset;
+ 
+ struct port_info {
+ 	struct adapter *adapter;
+ 	struct vlan_group *vlan_grp;
+-	struct sge_qset *qs;
+ 	u8 port_id;
+ 	u8 rx_csum_offload;
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
+ #define T3_MAX_LRO_MAX_PKTS 64
+ 
+ struct sge_qset {		/* an SGE queue set */
+-	struct adapter *adap;
+-	struct napi_struct napi;
+ 	struct sge_rspq rspq;
+ 	struct sge_fl fl[SGE_RXQ_PER_SET];
+ 	struct sge_txq txq[SGE_TXQ_PER_SET];
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
+ 	int lro_enabled;
+ 	int lro_frag_len;
+ 	void *lro_va;
+-	struct net_device *netdev;
++	struct net_device *netdev;	/* associated net device */
+ 	unsigned long txq_stopped;	/* which Tx queues are stopped */
+ 	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
+ 	unsigned long port_stats[SGE_PSTAT_MAX];
+@@ -240,6 +236,12 @@ struct adapter {
+ 	struct delayed_work adap_check_task;
+ 	struct work_struct ext_intr_handler_task;
+ 
++	/*
++	 * Dummy netdevices are needed when using multiple receive queues with
++	 * NAPI as each netdevice can service only one queue.
++	 */
++	struct net_device *dummy_netdev[SGE_QSETS - 1];
++
+ 	struct dentry *debugfs_root;
+ 
+ 	struct mutex mdio_lock;
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+ 	return netdev_priv(adap->port[idx]);
+ }
+ 
++/*
++ * We use the spare atalk_ptr to map a net device to its SGE queue set.
++ * This is a macro so it can be used as l-value.
++ */
++#define dev2qset(netdev) ((netdev)->atalk_ptr)
++
+ #define OFFLOAD_DEVMAP_BIT 15
+ 
+ #define tdev2adap(d) container_of(d, struct adapter, tdev)
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
+ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ 		      int irq_vec_idx, const struct qset_params *p,
+-		      int ntxq, struct net_device *dev);
++		      int ntxq, struct net_device *netdev);
+ int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
+ 		unsigned char *data);
+ irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
+ 		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
+ }
+ 
+-static void init_napi(struct adapter *adap)
++/*
++ * If we have multiple receive queues per port serviced by NAPI we need one
++ * netdevice per queue as NAPI operates on netdevices.  We already have one
++ * netdevice, namely the one associated with the interface, so we use dummy
++ * ones for any additional queues.  Note that these netdevices exist purely
++ * so that NAPI has something to work with, they do not represent network
++ * ports and are not registered.
++ */
++static int init_dummy_netdevs(struct adapter *adap)
+ {
+-	int i;
++	int i, j, dummy_idx = 0;
++	struct net_device *nd;
++
++	for_each_port(adap, i) {
++		struct net_device *dev = adap->port[i];
++		const struct port_info *pi = netdev_priv(dev);
++
++		for (j = 0; j < pi->nqsets - 1; j++) {
++			if (!adap->dummy_netdev[dummy_idx]) {
++				struct port_info *p;
++
++				nd = alloc_netdev(sizeof(*p), "", ether_setup);
++				if (!nd)
++					goto free_all;
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
++				p = netdev_priv(nd);
++				p->adapter = adap;
++				nd->weight = 64;
++				set_bit(__LINK_STATE_START, &nd->state);
++				adap->dummy_netdev[dummy_idx] = nd;
++			}
++			strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
++			dummy_idx++;
++		}
++	}
++	return 0;
+ 
+-		if (qs->adap)
+-			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
+-				       64);
++free_all:
++	while (--dummy_idx >= 0) {
++		free_netdev(adap->dummy_netdev[dummy_idx]);
++		adap->dummy_netdev[dummy_idx] = NULL;
+ 	}
++	return -ENOMEM;
+ }
+ 
+ /*
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
+ static void quiesce_rx(struct adapter *adap)
+ {
+ 	int i;
++	struct net_device *dev;
+ 
+-	for (i = 0; i < SGE_QSETS; i++)
+-		if (adap->sge.qs[i].adap)
+-			napi_disable(&adap->sge.qs[i].napi);
+-}
++	for_each_port(adap, i) {
++		dev = adap->port[i];
++		while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
++			msleep(1);
++	}
+ 
+-static void enable_all_napi(struct adapter *adap)
+-{
+-	int i;
+-	for (i = 0; i < SGE_QSETS; i++)
+-		if (adap->sge.qs[i].adap)
+-			napi_enable(&adap->sge.qs[i].napi);
++	for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
++		dev = adap->dummy_netdev[i];
++		if (dev)
++			while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
++				msleep(1);
++	}
+ }
+ 
+ /**
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
+  */
+ static int setup_sge_qsets(struct adapter *adap)
+ {
+-	int i, j, err, irq_idx = 0, qset_idx = 0;
++	int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
+ 	unsigned int ntxq = SGE_TXQ_PER_SET;
+ 
+ 	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
+ 
+ 	for_each_port(adap, i) {
+ 		struct net_device *dev = adap->port[i];
+-		struct port_info *pi = netdev_priv(dev);
++		const struct port_info *pi = netdev_priv(dev);
+ 
+-		pi->qs = &adap->sge.qs[pi->first_qset];
+ 		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
+ 			err = t3_sge_alloc_qset(adap, qset_idx, 1,
+ 				(adap->flags & USING_MSIX) ? qset_idx + 1 :
+ 							     irq_idx,
+-				&adap->params.sge.qset[qset_idx], ntxq, dev);
++				&adap->params.sge.qset[qset_idx], ntxq,
++				j == 0 ? dev :
++					 adap-> dummy_netdev[dummy_dev_idx++]);
+ 			if (err) {
+ 				t3_free_sge_resources(adap);
+ 				return err;
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
+ 				goto out;
+ 		}
+ 
++ 		err = init_dummy_netdevs(adap);
++ 		if (err)
++ 			goto out;
++
+ 		err = t3_init_hw(adap, 0);
+ 		if (err)
+ 			goto out;
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
+ 			goto out;
+ 
+ 		setup_rss(adap);
+-		init_napi(adap);
+ 		adap->flags |= FULL_INIT_DONE;
+ 	}
+ 
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
+ 				      adap->name, adap)))
+ 		goto irq_err;
+ 
+-	enable_all_napi(adap);
+ 	t3_sge_start(adap);
+ 	t3_intr_enable(adap);
+ 
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
+ 	int other_ports = adapter->open_device_map & PORT_MASK;
+ 	int err;
+ 
+-	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
+-		quiesce_rx(adapter);
++	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+ 		return err;
+-	}
+ 
+ 	set_bit(pi->port_id, &adapter->open_device_map);
+ 	if (is_offload(adapter) && !ofld_disable) {
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ 		netdev->poll_controller = cxgb_netpoll;
+ #endif
++		netdev->weight = 64;
+ 
+ 		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ 	}
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
+ 		t3_free_sge_resources(adapter);
+ 		cxgb_disable_msi(adapter);
+ 
++		for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
++			if (adapter->dummy_netdev[i]) {
++				free_netdev(adapter->dummy_netdev[i]);
++				adapter->dummy_netdev[i] = NULL;
++			}
++
+ 		for_each_port(adapter, i)
+ 			if (adapter->port[i])
+ 				free_netdev(adapter->port[i]);
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+ 				  q->rspq.desc, q->rspq.phys_addr);
+ 	}
+ 
++	if (q->netdev)
++		q->netdev->atalk_ptr = NULL;
++
+ 	memset(q, 0, sizeof(*q));
+ }
+ 
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	unsigned int ndesc, pidx, credits, gen, compl;
+ 	const struct port_info *pi = netdev_priv(dev);
+ 	struct adapter *adap = pi->adapter;
+-	struct sge_qset *qs = pi->qs;
++	struct sge_qset *qs = dev2qset(dev);
+ 	struct sge_txq *q = &qs->txq[TXQ_ETH];
+ 
+ 	/*
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
+ 	struct sk_buff *skb;
+ 	struct sge_qset *qs = (struct sge_qset *)data;
+ 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
++	const struct port_info *pi = netdev_priv(qs->netdev);
++	struct adapter *adap = pi->adapter;
+ 
+ 	spin_lock(&q->lock);
+       again:reclaim_completed_tx_imm(q);
+ 
+-	while (q->in_use < q->size &&
+-	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
++	while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
+ 
+ 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+ 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
+ 
+ 	spin_unlock(&q->lock);
+ 	wmb();
+-	t3_write_reg(qs->adap, A_SG_KDOORBELL,
++	t3_write_reg(adap, A_SG_KDOORBELL,
+ 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ }
+ 
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
+ 	else {
+ 		struct sge_qset *qs = rspq_to_qset(q);
+ 
+-		napi_schedule(&qs->napi);
++		if (__netif_rx_schedule_prep(qs->netdev))
++			__netif_rx_schedule(qs->netdev);
+ 		q->rx_head = skb;
+ 	}
+ 	q->rx_tail = skb;
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
+  *	receive handler.  Batches need to be of modest size as we do prefetches
+  *	on the packets in each.
+  */
+-static int ofld_poll(struct napi_struct *napi, int budget)
++static int ofld_poll(struct net_device *dev, int *budget)
+ {
+-	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
++	const struct port_info *pi = netdev_priv(dev);
++	struct adapter *adapter = pi->adapter;
++	struct sge_qset *qs = dev2qset(dev);
+ 	struct sge_rspq *q = &qs->rspq;
+-	struct adapter *adapter = qs->adap;
+-	int work_done = 0;
++	int work_done, limit = min(*budget, dev->quota), avail = limit;
+ 
+-	while (work_done < budget) {
++	while (avail) {
+ 		struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
+ 		int ngathered;
+ 
+ 		spin_lock_irq(&q->lock);
+ 		head = q->rx_head;
+ 		if (!head) {
+-			napi_complete(napi);
++			work_done = limit - avail;
++			*budget -= work_done;
++			dev->quota -= work_done;
++			__netif_rx_complete(dev);
+ 			spin_unlock_irq(&q->lock);
+-			return work_done;
++			return 0;
+ 		}
+ 
+ 		tail = q->rx_tail;
+ 		q->rx_head = q->rx_tail = NULL;
+ 		spin_unlock_irq(&q->lock);
+ 
+-		for (ngathered = 0; work_done < budget && head; work_done++) {
++		for (ngathered = 0; avail && head; avail--) {
+ 			prefetch(head->data);
+ 			skbs[ngathered] = head;
+ 			head = head->next;
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
+ 		}
+ 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
+ 	}
+-
+-	return work_done;
++	work_done = limit - avail;
++	*budget -= work_done;
++	dev->quota -= work_done;
++	return 1;
+ }
+ 
+ /**
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
+ 
+ /**
+  *	napi_rx_handler - the NAPI handler for Rx processing
+- *	@napi: the napi instance
++ *	@dev: the net device
+  *	@budget: how many packets we can process in this round
+  *
+  *	Handler for new data events when using NAPI.
+  */
+-static int napi_rx_handler(struct napi_struct *napi, int budget)
++static int napi_rx_handler(struct net_device *dev, int *budget)
+ {
+-	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+-	struct adapter *adap = qs->adap;
+-	int work_done = process_responses(adap, qs, budget);
++	const struct port_info *pi = netdev_priv(dev);
++	struct adapter *adap = pi->adapter;
++	struct sge_qset *qs = dev2qset(dev);
++	int effective_budget = min(*budget, dev->quota);
+ 
+-	if (likely(work_done < budget)) {
+-		napi_complete(napi);
++	int work_done = process_responses(adap, qs, effective_budget);
++	*budget -= work_done;
++	dev->quota -= work_done;
+ 
+-		/*
+-		 * Because we don't atomically flush the following
+-		 * write it is possible that in very rare cases it can
+-		 * reach the device in a way that races with a new
+-		 * response being written plus an error interrupt
+-		 * causing the NAPI interrupt handler below to return
+-		 * unhandled status to the OS.  To protect against
+-		 * this would require flushing the write and doing
+-		 * both the write and the flush with interrupts off.
+-		 * Way too expensive and unjustifiable given the
+-		 * rarity of the race.
+-		 *
+-		 * The race cannot happen at all with MSI-X.
+-		 */
+-		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+-			     V_NEWTIMER(qs->rspq.next_holdoff) |
+-			     V_NEWINDEX(qs->rspq.cidx));
+-	}
+-	return work_done;
++	if (work_done >= effective_budget)
++		return 1;
++
++	netif_rx_complete(dev);
++
++	/*
++	 * Because we don't atomically flush the following write it is
++	 * possible that in very rare cases it can reach the device in a way
++	 * that races with a new response being written plus an error interrupt
++	 * causing the NAPI interrupt handler below to return unhandled status
++	 * to the OS.  To protect against this would require flushing the write
++	 * and doing both the write and the flush with interrupts off.  Way too
++	 * expensive and unjustifiable given the rarity of the race.
++	 *
++	 * The race cannot happen at all with MSI-X.
++	 */
++	t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
++		     V_NEWTIMER(qs->rspq.next_holdoff) |
++		     V_NEWINDEX(qs->rspq.cidx));
++	return 0;
+ }
+ 
+ /*
+  * Returns true if the device is already scheduled for polling.
+  */
+-static inline int napi_is_scheduled(struct napi_struct *napi)
++static inline int napi_is_scheduled(struct net_device *dev)
+ {
+-	return test_bit(NAPI_STATE_SCHED, &napi->state);
++	return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+ }
+ 
+ /**
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+ 			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
+ 		return 0;
+ 	}
+-	napi_schedule(&qs->napi);
++	if (likely(__netif_rx_schedule_prep(qs->netdev)))
++		__netif_rx_schedule(qs->netdev);
+ 	return 1;
+ }
+ 
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
+ {
+ 	struct sge_qset *qs = cookie;
+-	struct adapter *adap = qs->adap;
++	const struct port_info *pi = netdev_priv(qs->netdev);
++	struct adapter *adap = pi->adapter;
+ 	struct sge_rspq *q = &qs->rspq;
+ 
+ 	spin_lock(&q->lock);
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
+ static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+ {
+ 	struct sge_qset *qs = cookie;
++	const struct port_info *pi = netdev_priv(qs->netdev);
++	struct adapter *adap = pi->adapter;
+ 	struct sge_rspq *q = &qs->rspq;
+ 
+ 	spin_lock(&q->lock);
+ 
+-	if (handle_responses(qs->adap, q) < 0)
++	if (handle_responses(adap, q) < 0)
+ 		q->unhandled_irqs++;
+ 	spin_unlock(&q->lock);
+ 	return IRQ_HANDLED;
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int rspq_check_napi(struct sge_qset *qs)
++static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
+ {
+-	struct sge_rspq *q = &qs->rspq;
+-
+-	if (!napi_is_scheduled(&qs->napi) &&
+-	    is_new_response(&q->desc[q->cidx], q)) {
+-		napi_schedule(&qs->napi);
++	if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
++		if (likely(__netif_rx_schedule_prep(dev)))
++			__netif_rx_schedule(dev);
+ 		return 1;
+ 	}
+ 	return 0;
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+ 
+ 	spin_lock(&q->lock);
+ 
+-	new_packets = rspq_check_napi(&adap->sge.qs[0]);
++	new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
+ 	if (adap->params.nports == 2)
+-		new_packets += rspq_check_napi(&adap->sge.qs[1]);
++		new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
++					       &adap->sge.qs[1].rspq);
+ 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
+ 		q->unhandled_irqs++;
+ 
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
+ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
+ {
+ 	u32 map;
++	struct net_device *dev;
+ 	struct adapter *adap = cookie;
+-	struct sge_qset *qs0 = &adap->sge.qs[0];
+-	struct sge_rspq *q0 = &qs0->rspq;
++	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+ 
+ 	t3_write_reg(adap, A_PL_CLI, 0);
+ 	map = t3_read_reg(adap, A_SG_DATA_INTR);
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
+ 	if (unlikely(map & F_ERRINTR))
+ 		t3_slow_intr_handler(adap);
+ 
+-	if (likely(map & 1))
+-		napi_schedule(&qs0->napi);
++	if (likely(map & 1)) {
++		dev = adap->sge.qs[0].netdev;
+ 
+-	if (map & 2)
+-		napi_schedule(&adap->sge.qs[1].napi);
++		if (likely(__netif_rx_schedule_prep(dev)))
++			__netif_rx_schedule(dev);
++	}
++	if (map & 2) {
++		dev = adap->sge.qs[1].netdev;
++
++		if (likely(__netif_rx_schedule_prep(dev)))
++			__netif_rx_schedule(dev);
++	}
+ 
+ 	spin_unlock(&q0->lock);
+ 	return IRQ_HANDLED;
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
+ {
+ 	spinlock_t *lock;
+ 	struct sge_qset *qs = (struct sge_qset *)data;
+-	struct adapter *adap = qs->adap;
++	const struct port_info *pi = netdev_priv(qs->netdev);
++	struct adapter *adap = pi->adapter;
+ 
+ 	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
+ 		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
+ 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
+ 	}
+ 	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
+-					    &adap->sge.qs[0].rspq.lock;
++	    &adap->sge.qs[0].rspq.lock;
+ 	if (spin_trylock_irq(lock)) {
+-		if (!napi_is_scheduled(&qs->napi)) {
++		if (!napi_is_scheduled(qs->netdev)) {
+ 			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+ 
+ 			if (qs->fl[0].credits < qs->fl[0].size)
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
+  */
+ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+ {
++	if (!qs->netdev)
++		return;
++
+ 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
+ 	qs->rspq.polling = p->polling;
+-	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
++	qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
+ }
+ 
+ /**
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+  */
+ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ 		      int irq_vec_idx, const struct qset_params *p,
+-		      int ntxq, struct net_device *dev)
++		      int ntxq, struct net_device *netdev)
+ {
+ 	int i, avail, ret = -ENOMEM;
+ 	struct sge_qset *q = &adapter->sge.qs[id];
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ 	}
+ 
+ 	spin_unlock_irq(&adapter->sge.reg_lock);
+-
+-	q->adap = adapter;
+-	q->netdev = dev;
++	q->netdev = netdev;
+ 	t3_update_qset_coalesce(q, p);
+ 
++ 	/*
++ 	 * We use atalk_ptr as a backpointer to a qset.  In case a device is
++ 	 * associated with multiple queue sets only the first one sets
++ 	 * atalk_ptr.
++ 	 */
++ 	if (netdev->atalk_ptr == NULL)
++ 		netdev->atalk_ptr = q;
++
+ 	init_lro_mgr(q, lro_mgr);
+ 
+ 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0020_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0020_sysfs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0020_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,202 @@
+---
+ drivers/net/cxgb3/cxgb3_main.c |   78 +++++++++++++++++++----------------------
+ 1 file changed, 38 insertions(+), 40 deletions(-)
+
+Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
++++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+@@ -76,6 +76,8 @@ enum {
+ 
+ #define EEPROM_MAGIC 0x38E2F10C
+ 
++#define to_net_dev(class) container_of(class, struct net_device, class_dev)
++
+ #define CH_DEVICE(devid, idx) \
+ 	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
+ 
+@@ -516,7 +518,7 @@ static int setup_sge_qsets(struct adapte
+ 	return 0;
+ }
+ 
+-static ssize_t attr_show(struct device *d, char *buf,
++static ssize_t attr_show(struct class_device *d, char *buf,
+ 			 ssize_t(*format) (struct net_device *, char *))
+ {
+ 	ssize_t len;
+@@ -528,7 +530,7 @@ static ssize_t attr_show(struct device *
+ 	return len;
+ }
+ 
+-static ssize_t attr_store(struct device *d,
++static ssize_t attr_store(struct class_device *d,
+ 			  const char *buf, size_t len,
+ 			  ssize_t(*set) (struct net_device *, unsigned int),
+ 			  unsigned int min_val, unsigned int max_val)
+@@ -559,10 +561,9 @@ static ssize_t format_##name(struct net_
+ 	struct adapter *adap = pi->adapter; \
+ 	return sprintf(buf, "%u\n", val_expr); \
+ } \
+-static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+-			   char *buf) \
++static ssize_t show_##name(struct class_device *cd, char *buf) \
+ { \
+-	return attr_show(d, buf, format_##name); \
++	return attr_show(cd, buf, format_##name); \
+ }
+ 
+ static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
+@@ -582,10 +583,10 @@ static ssize_t set_nfilters(struct net_d
+ 	return 0;
+ }
+ 
+-static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
+-			      const char *buf, size_t len)
++static ssize_t store_nfilters(struct class_device *cd, const char *buf,
++			      size_t len)
+ {
+-	return attr_store(d, buf, len, set_nfilters, 0, ~0);
++	return attr_store(cd, buf, len, set_nfilters, 0, ~0);
+ }
+ 
+ static ssize_t set_nservers(struct net_device *dev, unsigned int val)
+@@ -602,35 +603,34 @@ static ssize_t set_nservers(struct net_d
+ 	return 0;
+ }
+ 
+-static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
+-			      const char *buf, size_t len)
++static ssize_t store_nservers(struct class_device *cd, const char *buf,
++			      size_t len)
+ {
+-	return attr_store(d, buf, len, set_nservers, 0, ~0);
++	return attr_store(cd, buf, len, set_nservers, 0, ~0);
+ }
+ 
+ #define CXGB3_ATTR_R(name, val_expr) \
+ CXGB3_SHOW(name, val_expr) \
+-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+ 
+ #define CXGB3_ATTR_RW(name, val_expr, store_method) \
+ CXGB3_SHOW(name, val_expr) \
+-static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
++static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
+ 
+ CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
+ CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
+ CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
+ 
+ static struct attribute *cxgb3_attrs[] = {
+-	&dev_attr_cam_size.attr,
+-	&dev_attr_nfilters.attr,
+-	&dev_attr_nservers.attr,
++	&class_device_attr_cam_size.attr,
++	&class_device_attr_nfilters.attr,
++	&class_device_attr_nservers.attr,
+ 	NULL
+ };
+ 
+ static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
+ 
+-static ssize_t tm_attr_show(struct device *d,
+-			    char *buf, int sched)
++static ssize_t tm_attr_show(struct class_device *d, char *buf, int sched)
+ {
+ 	struct port_info *pi = netdev_priv(to_net_dev(d));
+ 	struct adapter *adap = pi->adapter;
+@@ -655,8 +655,8 @@ static ssize_t tm_attr_show(struct devic
+ 	return len;
+ }
+ 
+-static ssize_t tm_attr_store(struct device *d,
+-			     const char *buf, size_t len, int sched)
++static ssize_t tm_attr_store(struct class_device *d, const char *buf,
++			     size_t len, int sched)
+ {
+ 	struct port_info *pi = netdev_priv(to_net_dev(d));
+ 	struct adapter *adap = pi->adapter;
+@@ -680,17 +680,15 @@ static ssize_t tm_attr_store(struct devi
+ }
+ 
+ #define TM_ATTR(name, sched) \
+-static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+-			   char *buf) \
++static ssize_t show_##name(struct class_device *cd, char *buf) \
+ { \
+-	return tm_attr_show(d, buf, sched); \
++	return tm_attr_show(cd, buf, sched); \
+ } \
+-static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
+-			    const char *buf, size_t len) \
++static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
+ { \
+-	return tm_attr_store(d, buf, len, sched); \
++	return tm_attr_store(cd, buf, len, sched); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
++static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+ 
+ TM_ATTR(sched0, 0);
+ TM_ATTR(sched1, 1);
+@@ -702,14 +700,14 @@ TM_ATTR(sched6, 6);
+ TM_ATTR(sched7, 7);
+ 
+ static struct attribute *offload_attrs[] = {
+-	&dev_attr_sched0.attr,
+-	&dev_attr_sched1.attr,
+-	&dev_attr_sched2.attr,
+-	&dev_attr_sched3.attr,
+-	&dev_attr_sched4.attr,
+-	&dev_attr_sched5.attr,
+-	&dev_attr_sched6.attr,
+-	&dev_attr_sched7.attr,
++	&class_device_attr_sched0.attr,
++	&class_device_attr_sched1.attr,
++	&class_device_attr_sched2.attr,
++	&class_device_attr_sched3.attr,
++	&class_device_attr_sched4.attr,
++	&class_device_attr_sched5.attr,
++	&class_device_attr_sched6.attr,
++	&class_device_attr_sched7.attr,
+ 	NULL
+ };
+ 
+@@ -1051,8 +1049,8 @@ static int offload_open(struct net_devic
+ 		     adapter->port[0]->mtu : 0xffff);
+ 	init_smt(adapter);
+ 
+-	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
+-		dev_dbg(&dev->dev, "cannot create sysfs group\n");
++ 	if (sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group))
++		dev_dbg(&adapter->pdev->dev, "cannot create sysfs group\n");
+ 
+ 	/* Call back all registered clients */
+ 	cxgb3_add_clients(tdev);
+@@ -1077,7 +1075,7 @@ static int offload_close(struct t3cdev *
+ 	/* Call back all registered clients */
+ 	cxgb3_remove_clients(tdev);
+ 
+-	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
++	sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
+ 
+ 	tdev->lldev = NULL;
+ 	cxgb3_set_dummy_ops(tdev);
+@@ -2788,7 +2786,7 @@ static int __devinit init_one(struct pci
+ 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ 		adapter->flags |= USING_MSI;
+ 
+-	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
++	err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
+ 				 &cxgb3_attr_group);
+ 
+ 	print_port_info(adapter, ai);
+@@ -2819,7 +2817,7 @@ static void __devexit remove_one(struct 
+ 		int i;
+ 
+ 		t3_sge_stop(adapter);
+-		sysfs_remove_group(&adapter->port[0]->dev.kobj,
++		sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
+ 				   &cxgb3_attr_group);
+ 
+ 		if (is_offload(adapter)) {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0030_sset.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0030_sset.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0030_sset.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,34 @@
+---
+ drivers/net/cxgb3/cxgb3_main.c |   11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
++++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+@@ -1249,14 +1249,9 @@ static char stats_strings[][ETH_GSTRING_
+ 
+ };
+ 
+-static int get_sset_count(struct net_device *dev, int sset)
++static int get_stats_count(struct net_device *dev)
+ {
+-	switch (sset) {
+-	case ETH_SS_STATS:
+-		return ARRAY_SIZE(stats_strings);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
++	return ARRAY_SIZE(stats_strings);
+ }
+ 
+ #define T3_REGMAP_SIZE (3 * 1024)
+@@ -1774,7 +1769,7 @@ static const struct ethtool_ops cxgb_eth
+ 	.get_strings = get_strings,
+ 	.phys_id = cxgb3_phys_id,
+ 	.nway_reset = restart_autoneg,
+-	.get_sset_count = get_sset_count,
++	.get_stats_count = get_stats_count,
+ 	.get_ethtool_stats = get_stats,
+ 	.get_regs_len = get_regs_len,
+ 	.get_regs = get_regs,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0100_remove_lro.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0100_remove_lro.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0100_remove_lro.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,491 @@
+---
+ drivers/net/cxgb3/adapter.h     |   14 --
+ drivers/net/cxgb3/common.h      |    1 
+ drivers/net/cxgb3/cxgb3_ioctl.h |    1 
+ drivers/net/cxgb3/cxgb3_main.c  |   19 ---
+ drivers/net/cxgb3/sge.c         |  230 ++--------------------------------------
+ drivers/net/cxgb3/t3_cpl.h      |   11 -
+ 6 files changed, 12 insertions(+), 264 deletions(-)
+
+Index: ofed_kernel/drivers/net/cxgb3/adapter.h
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
++++ ofed_kernel/drivers/net/cxgb3/adapter.h
+@@ -42,7 +42,6 @@
+ #include <linux/cache.h>
+ #include <linux/mutex.h>
+ #include <linux/bitops.h>
+-#include <linux/inet_lro.h>
+ #include "t3cdev.h"
+ #include <asm/io.h>
+ 
+@@ -171,27 +170,14 @@ enum {				/* per port SGE statistics */
+ 	SGE_PSTAT_TX_CSUM,	/* # of TX checksum offloads */
+ 	SGE_PSTAT_VLANEX,	/* # of VLAN tag extractions */
+ 	SGE_PSTAT_VLANINS,	/* # of VLAN tag insertions */
+-	SGE_PSTAT_LRO_AGGR,	/* # of page chunks added to LRO sessions */
+-	SGE_PSTAT_LRO_FLUSHED,	/* # of flushed LRO sessions */
+-	SGE_PSTAT_LRO_NO_DESC,	/* # of overflown LRO sessions */
+ 
+ 	SGE_PSTAT_MAX		/* must be last */
+ };
+ 
+-#define T3_MAX_LRO_SES 8
+-#define T3_MAX_LRO_MAX_PKTS 64
+-
+ struct sge_qset {		/* an SGE queue set */
+ 	struct sge_rspq rspq;
+ 	struct sge_fl fl[SGE_RXQ_PER_SET];
+ 	struct sge_txq txq[SGE_TXQ_PER_SET];
+-	struct net_lro_mgr lro_mgr;
+-	struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
+-	struct skb_frag_struct *lro_frag_tbl;
+-	int lro_nfrags;
+-	int lro_enabled;
+-	int lro_frag_len;
+-	void *lro_va;
+ 	struct net_device *netdev;	/* associated net device */
+ 	unsigned long txq_stopped;	/* which Tx queues are stopped */
+ 	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
+Index: ofed_kernel/drivers/net/cxgb3/common.h
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/common.h
++++ ofed_kernel/drivers/net/cxgb3/common.h
+@@ -351,7 +351,6 @@ struct tp_params {
+ 
+ struct qset_params {		/* SGE queue set parameters */
+ 	unsigned int polling;	/* polling/interrupt service for rspq */
+-	unsigned int lro;	/* large receive offload */
+ 	unsigned int coalesce_usecs;	/* irq coalescing timer */
+ 	unsigned int rspq_size;	/* # of entries in response queue */
+ 	unsigned int fl_size;	/* # of entries in regular free list */
+Index: ofed_kernel/drivers/net/cxgb3/cxgb3_ioctl.h
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_ioctl.h
++++ ofed_kernel/drivers/net/cxgb3/cxgb3_ioctl.h
+@@ -90,7 +90,6 @@ struct ch_qset_params {
+ 	int32_t fl_size[2];
+ 	int32_t intr_lat;
+ 	int32_t polling;
+-	int32_t lro;
+ 	int32_t cong_thres;
+ };
+ 
+Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
++++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
+@@ -1239,9 +1239,6 @@ static char stats_strings[][ETH_GSTRING_
+ 	"VLANinsertions     ",
+ 	"TxCsumOffload      ",
+ 	"RxCsumGood         ",
+-	"LroAggregated      ",
+-	"LroFlushed         ",
+-	"LroNoDesc          ",
+ 	"RxDrops            ",
+ 
+ 	"CheckTXEnToggled   ",
+@@ -1365,9 +1362,6 @@ static void get_stats(struct net_device 
+ 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
+ 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
+ 	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
+-	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
+-	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
+-	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
+ 	*data++ = s->rx_cong_drops;
+ 
+ 	*data++ = s->num_toggled;
+@@ -1586,13 +1580,6 @@ static int set_rx_csum(struct net_device
+ 	struct port_info *p = netdev_priv(dev);
+ 
+ 	p->rx_csum_offload = data;
+-	if (!data) {
+-		struct adapter *adap = p->adapter;
+-		int i;
+-
+-		for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
+-			adap->sge.qs[i].lro_enabled = 0;
+-	}
+ 	return 0;
+ }
+ 
+@@ -1865,11 +1852,6 @@ static int cxgb_extension_ioctl(struct n
+ 				}
+ 			}
+ 		}
+-		if (t.lro >= 0) {
+-			struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
+-			q->lro = t.lro;
+-			qs->lro_enabled = t.lro;
+-		}
+ 		break;
+ 	}
+ 	case CHELSIO_GET_QSET_PARAMS:{
+@@ -1889,7 +1871,6 @@ static int cxgb_extension_ioctl(struct n
+ 		t.fl_size[0] = q->fl_size;
+ 		t.fl_size[1] = q->jumbo_size;
+ 		t.polling = q->polling;
+-		t.lro = q->lro;
+ 		t.intr_lat = q->coalesce_usecs;
+ 		t.cong_thres = q->cong_thres;
+ 
+Index: ofed_kernel/drivers/net/cxgb3/sge.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/sge.c
++++ ofed_kernel/drivers/net/cxgb3/sge.c
+@@ -774,7 +774,7 @@ recycle:
+ 		goto recycle;
+ 
+ 	if (!skb)
+-		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
++		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+ 	if (unlikely(!newskb)) {
+ 		if (!drop_thres)
+ 			return NULL;
+@@ -1854,10 +1854,9 @@ static void restart_tx(struct sge_qset *
+  *	if it was immediate data in a response.
+  */
+ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
+-		   struct sk_buff *skb, int pad, int lro)
++		   struct sk_buff *skb, int pad)
+ {
+ 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
+-	struct sge_qset *qs = rspq_to_qset(rq);
+ 	struct port_info *pi;
+ 
+ 	skb_pull(skb, sizeof(*p) + pad);
+@@ -1874,202 +1873,18 @@ static void rx_eth(struct adapter *adap,
+ 	if (unlikely(p->vlan_valid)) {
+ 		struct vlan_group *grp = pi->vlan_grp;
+ 
+-		qs->port_stats[SGE_PSTAT_VLANEX]++;
++		rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
+ 		if (likely(grp))
+-			if (lro)
+-				lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
+-							     grp,
+-							     ntohs(p->vlan),
+-							     p);
+-			else
+-				__vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
+-					  	  rq->polling);
++			__vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
++					  rq->polling);
+ 		else
+ 			dev_kfree_skb_any(skb);
+-	} else if (rq->polling) {
+-		if (lro)
+-			lro_receive_skb(&qs->lro_mgr, skb, p);
+-		else
+-			netif_receive_skb(skb);
+-	} else
++	} else if (rq->polling)
++		netif_receive_skb(skb);
++	else
+ 		netif_rx(skb);
+ }
+ 
+-static inline int is_eth_tcp(u32 rss)
+-{
+-	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
+-}
+-
+-/**
+- *	lro_frame_ok - check if an ingress packet is eligible for LRO
+- *	@p: the CPL header of the packet
+- *
+- *	Returns true if a received packet is eligible for LRO.
+- *	The following conditions must be true:
+- *	- packet is TCP/IP Ethernet II (checked elsewhere)
+- *	- not an IP fragment
+- *	- no IP options
+- *	- TCP/IP checksums are correct
+- *	- the packet is for this host
+- */
+-static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
+-{
+-	const struct ethhdr *eh = (struct ethhdr *)(p + 1);
+-	const struct iphdr *ih = (struct iphdr *)(eh + 1);
+-
+-	return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
+-		eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
+-}
+-
+-#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
+-                       TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
+-		                       TCP_FLAG_SYN | TCP_FLAG_FIN)
+-#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
+-                     (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
+-
+-/**
+- *	lro_segment_ok - check if a TCP segment is eligible for LRO
+- *	@tcph: the TCP header of the packet
+- *
+- *	Returns true if a TCP packet is eligible for LRO.  This requires that
+- *	the packet have only the ACK flag set and no TCP options besides
+- *	time stamps.
+- */
+-static inline int lro_segment_ok(const struct tcphdr *tcph)
+-{
+-	int optlen;
+-
+-	if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
+-		return 0;
+-
+-	optlen = (tcph->doff << 2) - sizeof(*tcph);
+-	if (optlen) {
+-		const u32 *opt = (const u32 *)(tcph + 1);
+-
+-		if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
+-		    *opt != htonl(TSTAMP_WORD) || !opt[2])
+-			return 0;
+-	}
+-	return 1;
+-}
+-
+-static int t3_get_lro_header(void **eh,  void **iph, void **tcph,
+-			     u64 *hdr_flags, void *priv)
+-{
+-	const struct cpl_rx_pkt *cpl = priv;
+-
+-	if (!lro_frame_ok(cpl))
+-		return -1;
+-
+-	*eh = (struct ethhdr *)(cpl + 1);
+-	*iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
+-	*tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
+-
+-	 if (!lro_segment_ok(*tcph))
+-		return -1;
+-
+-	*hdr_flags = LRO_IPV4 | LRO_TCP;
+-	return 0;
+-}
+-
+-static int t3_get_skb_header(struct sk_buff *skb,
+-			      void **iph, void **tcph, u64 *hdr_flags,
+-			      void *priv)
+-{
+-	void *eh;
+-
+-	return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
+-}
+-
+-static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
+-			      void **iph, void **tcph, u64 *hdr_flags,
+-			      void *priv)
+-{
+-	return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
+-}
+-
+-/**
+- *	lro_add_page - add a page chunk to an LRO session
+- *	@adap: the adapter
+- *	@qs: the associated queue set
+- *	@fl: the free list containing the page chunk to add
+- *	@len: packet length
+- *	@complete: Indicates the last fragment of a frame
+- *
+- *	Add a received packet contained in a page chunk to an existing LRO
+- *	session.
+- */
+-static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
+-			 struct sge_fl *fl, int len, int complete)
+-{
+-	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+-	struct cpl_rx_pkt *cpl;
+-	struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
+-	int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
+-	int offset = 0;
+-
+-	if (!nr_frags) {
+-		offset = 2 + sizeof(struct cpl_rx_pkt);
+-		qs->lro_va = cpl = sd->pg_chunk.va + 2;
+-	}
+-
+-	fl->credits--;
+-
+-	len -= offset;
+-	pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+-			 fl->buf_size, PCI_DMA_FROMDEVICE);
+-
+-	rx_frag += nr_frags;
+-	rx_frag->page = sd->pg_chunk.page;
+-	rx_frag->page_offset = sd->pg_chunk.offset + offset;
+-	rx_frag->size = len;
+-	frag_len += len;
+-	qs->lro_nfrags++;
+-	qs->lro_frag_len = frag_len;
+-
+-	if (!complete)
+-		return;
+-
+-	qs->lro_nfrags = qs->lro_frag_len = 0;
+-	cpl = qs->lro_va;
+-
+-	if (unlikely(cpl->vlan_valid)) {
+-		struct net_device *dev = qs->netdev;
+-		struct port_info *pi = netdev_priv(dev);
+-		struct vlan_group *grp = pi->vlan_grp;
+-
+-		if (likely(grp != NULL)) {
+-			lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
+-						       qs->lro_frag_tbl,
+-						       frag_len, frag_len,
+-						       grp, ntohs(cpl->vlan),
+-						       cpl, 0);
+-			return;
+-		}
+-	}
+-	lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
+-			  frag_len, frag_len, cpl, 0);
+-}
+-
+-/**
+- *	init_lro_mgr - initialize a LRO manager object
+- *	@lro_mgr: the LRO manager object
+- */
+-static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
+-{
+-	lro_mgr->dev = qs->netdev;
+-	lro_mgr->features = LRO_F_NAPI;
+-	lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
+-	lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
+-	lro_mgr->max_desc = T3_MAX_LRO_SES;
+-	lro_mgr->lro_arr = qs->lro_desc;
+-	lro_mgr->get_frag_header = t3_get_frag_header;
+-	lro_mgr->get_skb_header = t3_get_skb_header;
+-	lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
+-	if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
+-		lro_mgr->max_aggr = MAX_SKB_FRAGS;
+-}
+-
+ /**
+  *	handle_rsp_cntrl_info - handles control information in a response
+  *	@qs: the queue set corresponding to the response
+@@ -2198,7 +2013,7 @@ static int process_responses(struct adap
+ 	q->next_holdoff = q->holdoff_tmr;
+ 
+ 	while (likely(budget_left && is_new_response(r, q))) {
+-		int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
++		int packet_complete, eth, ethpad = 2;
+ 		struct sk_buff *skb = NULL;
+ 		u32 len, flags = ntohl(r->flags);
+ 		__be32 rss_hi = *(const __be32 *)r,
+@@ -2230,9 +2045,6 @@ no_mem:
+ 		} else if ((len = ntohl(r->len_cq)) != 0) {
+ 			struct sge_fl *fl;
+ 
+-			if (eth)
+-				lro = qs->lro_enabled && is_eth_tcp(rss_hi);
+-
+ 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+ 			if (fl->use_pages) {
+ 				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+@@ -2242,12 +2054,6 @@ no_mem:
+ 				prefetch(addr + L1_CACHE_BYTES);
+ #endif
+ 				__refill_fl(adap, fl);
+-				if (lro > 0) {
+-					lro_add_page(adap, qs, fl,
+-						     G_RSPD_LEN(len),
+-						     flags & F_RSPD_EOP);
+-					 goto next_fl;
+-				}
+ 
+ 				skb = get_packet_pg(adap, fl, q,
+ 						    G_RSPD_LEN(len),
+@@ -2263,7 +2069,7 @@ no_mem:
+ 				q->rx_drops++;
+ 			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
+ 				__skb_pull(skb, 2);
+-next_fl:
++
+ 			if (++fl->cidx == fl->size)
+ 				fl->cidx = 0;
+ 		} else
+@@ -2293,7 +2099,7 @@ next_fl:
+ 
+ 		if (skb != NULL && packet_complete) {
+ 			if (eth)
+-				rx_eth(adap, q, skb, ethpad, lro);
++				rx_eth(adap, q, skb, ethpad);
+ 			else {
+ 				q->offload_pkts++;
+ 				/* Preserve the RSS info in csum & priority */
+@@ -2305,17 +2111,12 @@ next_fl:
+ 			}
+ 
+ 			if (flags & F_RSPD_EOP)
+-				clear_rspq_bufstate(q);
++				clear_rspq_bufstate(q);
+ 		}
+ 		--budget_left;
+ 	}
+ 
+ 	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+-	lro_flush_all(&qs->lro_mgr);
+-	qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
+-	qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
+-	qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
+-
+ 	if (sleeping)
+ 		check_ring_db(adap, qs, sleeping);
+ 
+@@ -2876,7 +2677,6 @@ int t3_sge_alloc_qset(struct adapter *ad
+ {
+ 	int i, avail, ret = -ENOMEM;
+ 	struct sge_qset *q = &adapter->sge.qs[id];
+-	struct net_lro_mgr *lro_mgr = &q->lro_mgr;
+ 
+ 	init_qset_cntxt(q, id);
+ 	init_timer(&q->tx_reclaim_timer);
+@@ -2957,10 +2757,6 @@ int t3_sge_alloc_qset(struct adapter *ad
+ 	q->fl[0].order = FL0_PG_ORDER;
+ 	q->fl[1].order = FL1_PG_ORDER;
+ 
+-	q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
+-				  sizeof(struct skb_frag_struct),
+-				  GFP_KERNEL);
+-	q->lro_nfrags = q->lro_frag_len = 0;
+ 	spin_lock_irq(&adapter->sge.reg_lock);
+ 
+ 	/* FL threshold comparison uses < */
+@@ -3017,8 +2813,6 @@ int t3_sge_alloc_qset(struct adapter *ad
+  	if (netdev->atalk_ptr == NULL)
+  		netdev->atalk_ptr = q;
+ 
+-	init_lro_mgr(q, lro_mgr);
+-
+ 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
+ 			  GFP_KERNEL | __GFP_COMP);
+ 	if (!avail) {
+Index: ofed_kernel/drivers/net/cxgb3/t3_cpl.h
+===================================================================
+--- ofed_kernel.orig/drivers/net/cxgb3/t3_cpl.h
++++ ofed_kernel/drivers/net/cxgb3/t3_cpl.h
+@@ -174,13 +174,6 @@ enum {				/* TCP congestion control algo
+ 	CONG_ALG_HIGHSPEED
+ };
+ 
+-enum {			/* RSS hash type */
+-	RSS_HASH_NONE = 0,
+-	RSS_HASH_2_TUPLE = 1,
+-	RSS_HASH_4_TUPLE = 2,
+-	RSS_HASH_TCPV6 = 3
+-};
+-
+ union opcode_tid {
+ 	__be32 opcode_tid;
+ 	__u8 opcode;
+@@ -194,10 +187,6 @@ union opcode_tid {
+ #define S_QNUM 0
+ #define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
+ 
+-#define S_HASHTYPE 22
+-#define M_HASHTYPE 0x3
+-#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+-
+ /* tid is assumed to be 24-bits */
+ #define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0110_provider_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0110_provider_sysfs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/cxgb3_0110_provider_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,120 @@
+---
+ drivers/infiniband/hw/cxgb3/iwch_provider.c |   63 +++++++++++++---------------
+ 1 file changed, 30 insertions(+), 33 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/cxgb3/iwch_provider.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/cxgb3/iwch_provider.c
++++ ofed_kernel/drivers/infiniband/hw/cxgb3/iwch_provider.c
+@@ -1178,46 +1178,43 @@ static int iwch_query_port(struct ib_dev
+ 	return 0;
+ }
+ 
+-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_rev(struct class_device *cdev, char *buf)
+ {
+-	struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
+-						 ibdev.dev);
+-	PDBG("%s dev 0x%p\n", __func__, dev);
++	struct iwch_dev *iwch_dev = container_of(cdev, struct iwch_dev,
++						 ibdev.class_dev);
++	PDBG("%s dev 0x%p\n", __func__, cdev);
+ 	return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
+ }
+ 
+-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+ {
+-	struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
+-						 ibdev.dev);
++	struct iwch_dev *iwch_dev = container_of(cdev, struct iwch_dev,
++						 ibdev.class_dev);
+ 	struct ethtool_drvinfo info;
+ 	struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
+ 
+-	PDBG("%s dev 0x%p\n", __func__, dev);
++	PDBG("%s dev 0x%p\n", __func__, cdev);
+ 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
+ 	return sprintf(buf, "%s\n", info.fw_version);
+ }
+ 
+-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_hca(struct class_device *cdev, char *buf)
+ {
+-	struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
+-						 ibdev.dev);
++	struct iwch_dev *iwch_dev = container_of(cdev, struct iwch_dev,
++						 ibdev.class_dev);
+ 	struct ethtool_drvinfo info;
+ 	struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
+ 
+-	PDBG("%s dev 0x%p\n", __func__, dev);
++	PDBG("%s dev 0x%p\n", __func__, cdev);
+ 	lldev->ethtool_ops->get_drvinfo(lldev, &info);
+ 	return sprintf(buf, "%s\n", info.driver);
+ }
+ 
+-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_board(struct class_device *cdev, char *buf)
+ {
+-	struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
+-						 ibdev.dev);
+-	PDBG("%s dev 0x%p\n", __func__, dev);
++	struct iwch_dev *iwch_dev = container_of(cdev, struct iwch_dev,
++						 ibdev.class_dev);
++	PDBG("%s dev 0x%p\n", __func__, cdev);
+ 	return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
+ 		       iwch_dev->rdev.rnic_info.pdev->device);
+ }
+@@ -1281,16 +1278,16 @@ static int iwch_get_mib(struct ib_device
+ 	return 0;
+ }
+ 
+-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+-
+-static struct device_attribute *iwch_class_attributes[] = {
+-	&dev_attr_hw_rev,
+-	&dev_attr_fw_ver,
+-	&dev_attr_hca_type,
+-	&dev_attr_board_id,
++static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
++static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
++static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
++static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
++
++static struct class_device_attribute *iwch_class_attributes[] = {
++	&class_device_attr_hw_rev,
++	&class_device_attr_fw_ver,
++	&class_device_attr_hca_type,
++	&class_device_attr_board_id,
+ };
+ 
+ int iwch_register_device(struct iwch_dev *dev)
+@@ -1389,8 +1386,8 @@ int iwch_register_device(struct iwch_dev
+ 		goto bail1;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
+-		ret = device_create_file(&dev->ibdev.dev,
+-					 iwch_class_attributes[i]);
++		ret = class_device_create_file(&dev->ibdev.class_dev,
++						iwch_class_attributes[i]);
+ 		if (ret) {
+ 			goto bail2;
+ 		}
+@@ -1408,8 +1405,8 @@ void iwch_unregister_device(struct iwch_
+ 
+ 	PDBG("%s iwch_dev %p\n", __func__, dev);
+ 	for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
+-		device_remove_file(&dev->ibdev.dev,
+-				   iwch_class_attributes[i]);
++		class_device_remove_file(&dev->ibdev.class_dev,
++					iwch_class_attributes[i]);
+ 	ib_unregister_device(&dev->ibdev);
+ 	return;
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/dma_mapping_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/dma_mapping_to_2_6_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/dma_mapping_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+---
+ include/rdma/ib_verbs.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h
++++ ofed_kernel/include/rdma/ib_verbs.h
+@@ -1682,7 +1682,7 @@ static inline int ib_dma_mapping_error(s
+ {
+ 	if (dev->dma_ops)
+ 		return dev->dma_ops->mapping_error(dev, dma_addr);
+-	return dma_mapping_error(dev->dma_device, dma_addr);
++	return dma_mapping_error(dma_addr);
+ }
+ 
+ /**

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-00-revert_inhibit_dmem.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-00-revert_inhibit_dmem.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-00-revert_inhibit_dmem.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,112 @@
+From 8e110a2fff1f110a9211f240acc12b8310e5cbd3 Mon Sep 17 00:00:00 2001
+From: Stefan Roscher <ossrosch at linux.vnet.ibm.com>
+Date: Wed, 22 Oct 2008 15:54:38 -0700
+Subject: [PATCH] IB/ehca: Reject dynamic memory add/remove when ehca adapter is present
+
+Since the ehca device driver does not support dynamic memory add and
+remove operations, the driver must explicitly reject such requests in
+order to prevent unpredictable behaviors related to existing memory
+regions that cover all of memory being used by InfiniBand protocols in
+the kernel.
+
+The solution (for now at least) is to add a memory notifier to the
+ehca device driver and if a request for dynamic memory add or remove
+comes in, ehca will always reject it.  The user can add or remove
+memory by hot-removing the ehca adapter, performing the memory
+operation, and then hot-adding the ehca adapter back.
+
+Signed-off-by: Stefan Roscher <stefan.roscher at de.ibm.com>
+Signed-off-by: Roland Dreier <rolandd at cisco.com>
+---
+ drivers/infiniband/hw/ehca/ehca_main.c |   47 --------------------------------
+ 1 files changed, 0 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
+index bec7e02..086959a 100644
+--- a/drivers/infiniband/hw/ehca/ehca_main.c
++++ b/drivers/infiniband/hw/ehca/ehca_main.c
+@@ -44,8 +44,6 @@
+ #include <linux/slab.h>
+ #endif
+ 
+-#include <linux/notifier.h>
+-#include <linux/memory.h>
+ #include "ehca_classes.h"
+ #include "ehca_iverbs.h"
+ #include "ehca_mrmw.h"
+@@ -971,40 +969,6 @@ void ehca_poll_eqs(unsigned long data)
+ 	spin_unlock(&shca_list_lock);
+ }
+ 
+-static int ehca_mem_notifier(struct notifier_block *nb,
+-			     unsigned long action, void *data)
+-{
+-	static unsigned long ehca_dmem_warn_time;
+-
+-	switch (action) {
+-	case MEM_CANCEL_OFFLINE:
+-	case MEM_CANCEL_ONLINE:
+-	case MEM_ONLINE:
+-	case MEM_OFFLINE:
+-		return NOTIFY_OK;
+-	case MEM_GOING_ONLINE:
+-	case MEM_GOING_OFFLINE:
+-		/* only ok if no hca is attached to the lpar */
+-		spin_lock(&shca_list_lock);
+-		if (list_empty(&shca_list)) {
+-			spin_unlock(&shca_list_lock);
+-			return NOTIFY_OK;
+-		} else {
+-			spin_unlock(&shca_list_lock);
+-			if (printk_timed_ratelimit(&ehca_dmem_warn_time,
+-						   30 * 1000))
+-				ehca_gen_err("DMEM operations are not allowed"
+-					     "in conjunction with eHCA");
+-			return NOTIFY_BAD;
+-		}
+-	}
+-	return NOTIFY_OK;
+-}
+-
+-static struct notifier_block ehca_mem_nb = {
+-	.notifier_call = ehca_mem_notifier,
+-};
+-
+ static int __init ehca_module_init(void)
+ {
+ 	int ret;
+@@ -1032,12 +996,6 @@ static int __init ehca_module_init(void)
+ 		goto module_init2;
+ 	}
+ 
+-	ret = register_memory_notifier(&ehca_mem_nb);
+-	if (ret) {
+-		ehca_gen_err("Failed registering memory add/remove notifier");
+-		goto module_init3;
+-	}
+-
+ 	if (ehca_poll_all_eqs != 1) {
+ 		ehca_gen_err("WARNING!!!");
+ 		ehca_gen_err("It is possible to lose interrupts.");
+@@ -1050,9 +1008,6 @@ static int __init ehca_module_init(void)
+ 
+ 	return 0;
+ 
+-module_init3:
+-	ibmebus_unregister_driver(&ehca_driver);
+-
+ module_init2:
+ 	ehca_destroy_slab_caches();
+ 
+@@ -1068,8 +1023,6 @@ static void __exit ehca_module_exit(void)
+ 
+ 	ibmebus_unregister_driver(&ehca_driver);
+ 
+-	unregister_memory_notifier(&ehca_mem_nb);
+-
+ 	ehca_destroy_slab_caches();
+ 
+ 	ehca_destroy_comp_pool();
+-- 
+1.5.5
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-01-ibmebus_loc_code.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-01-ibmebus_loc_code.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca-01-ibmebus_loc_code.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,191 @@
+---
+ drivers/infiniband/hw/ehca/ehca_classes.h |    2 -
+ drivers/infiniband/hw/ehca/ehca_eq.c      |    6 +--
+ drivers/infiniband/hw/ehca/ehca_main.c    |   49 ++++++++++++++----------------
+ 3 files changed, 27 insertions(+), 30 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ehca/ehca_classes.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ehca/ehca_classes.h
++++ ofed_kernel/drivers/infiniband/hw/ehca/ehca_classes.h
+@@ -112,7 +112,7 @@ struct ehca_sport {
+ 
+ struct ehca_shca {
+ 	struct ib_device ib_device;
+-	struct of_device *ofdev;
++	struct ibmebus_dev *ibmebus_dev;
+ 	u8 num_ports;
+ 	int hw_level;
+ 	struct list_head shca_list;
+Index: ofed_kernel/drivers/infiniband/hw/ehca/ehca_eq.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ehca/ehca_eq.c
++++ ofed_kernel/drivers/infiniband/hw/ehca/ehca_eq.c
+@@ -122,7 +122,7 @@ int ehca_create_eq(struct ehca_shca *shc
+ 
+ 	/* register interrupt handlers and initialize work queues */
+ 	if (type == EHCA_EQ) {
+-		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
++		ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq,
+ 					  IRQF_DISABLED, "ehca_eq",
+ 					  (void *)shca);
+ 		if (ret < 0)
+@@ -130,7 +130,7 @@ int ehca_create_eq(struct ehca_shca *shc
+ 
+ 		tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
+ 	} else if (type == EHCA_NEQ) {
+-		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
++		ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq,
+ 					  IRQF_DISABLED, "ehca_neq",
+ 					  (void *)shca);
+ 		if (ret < 0)
+@@ -170,7 +170,7 @@ int ehca_destroy_eq(struct ehca_shca *sh
+ 	u64 h_ret;
+ 
+ 	spin_lock_irqsave(&eq->spinlock, flags);
+-	ibmebus_free_irq(eq->ist, (void *)shca);
++	ibmebus_free_irq(NULL, eq->ist, (void *)shca);
+ 
+ 	h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+ 
+Index: ofed_kernel/drivers/infiniband/hw/ehca/ehca_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ehca/ehca_main.c
++++ ofed_kernel/drivers/infiniband/hw/ehca/ehca_main.c
+@@ -289,8 +289,8 @@ static int ehca_sense_attributes(struct 
+ 	};
+ 
+ 	ehca_gen_dbg("Probing adapter %s...",
+-		     shca->ofdev->node->full_name);
+-	loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
++		     shca->ibmebus_dev->ofdev.node->full_name);
++	loc_code = of_get_property(shca->ibmebus_dev->ofdev.node, "ibm,loc-code", NULL);
+ 	if (loc_code)
+ 		ehca_gen_dbg(" ... location lode=%s", loc_code);
+ 
+@@ -458,7 +458,7 @@ static int ehca_init_device(struct ehca_
+ 	shca->ib_device.node_type           = RDMA_NODE_IB_CA;
+ 	shca->ib_device.phys_port_cnt       = shca->num_ports;
+ 	shca->ib_device.num_comp_vectors    = 1;
+-	shca->ib_device.dma_device          = &shca->ofdev->dev;
++	shca->ib_device.dma_device          = &shca->ibmebus_dev->ofdev.dev;
+ 	shca->ib_device.query_device        = ehca_query_device;
+ 	shca->ib_device.query_port          = ehca_query_port;
+ 	shca->ib_device.query_gid           = ehca_query_gid;
+@@ -619,11 +619,6 @@ static struct attribute_group ehca_drv_a
+ 	.attrs = ehca_drv_attrs
+ };
+ 
+-static struct attribute_group *ehca_drv_attr_groups[] = {
+-	&ehca_drv_attr_grp,
+-	NULL,
+-};
+-
+ #define EHCA_RESOURCE_ATTR(name)                                           \
+ static ssize_t  ehca_show_##name(struct device *dev,                       \
+ 				 struct device_attribute *attr,            \
+@@ -707,7 +702,7 @@ static struct attribute_group ehca_dev_a
+ 	.attrs = ehca_dev_attrs
+ };
+ 
+-static int __devinit ehca_probe(struct of_device *dev,
++static int __devinit ehca_probe(struct ibmebus_dev *dev,
+ 				const struct of_device_id *id)
+ {
+ 	struct ehca_shca *shca;
+@@ -715,16 +710,16 @@ static int __devinit ehca_probe(struct o
+ 	struct ib_pd *ibpd;
+ 	int ret, i, eq_size;
+ 
+-	handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
++	handle = of_get_property(dev->ofdev.node, "ibm,hca-handle", NULL);
+ 	if (!handle) {
+ 		ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
+-			     dev->node->full_name);
++			     dev->ofdev.node->full_name);
+ 		return -ENODEV;
+ 	}
+ 
+ 	if (!(*handle)) {
+ 		ehca_gen_err("Wrong eHCA handle for adapter: %s.",
+-			     dev->node->full_name);
++			     dev->ofdev.node->full_name);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -739,9 +734,9 @@ static int __devinit ehca_probe(struct o
+ 	for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
+ 		spin_lock_init(&shca->sport[i].mod_sqp_lock);
+ 
+-	shca->ofdev = dev;
++	shca->ibmebus_dev = dev;
+ 	shca->ipz_hca_handle.handle = *handle;
+-	dev->dev.driver_data = shca;
++	dev->ofdev.dev.driver_data = shca;
+ 
+ 	ret = ehca_sense_attributes(shca);
+ 	if (ret < 0) {
+@@ -818,7 +813,7 @@ static int __devinit ehca_probe(struct o
+ 		}
+ 	}
+ 
+-	ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
++	ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
+ 	if (ret) /* only complain; we can live without attributes */
+ 		ehca_err(&shca->ib_device,
+ 			 "Cannot create device attributes  ret=%d", ret);
+@@ -868,12 +863,12 @@ probe1:
+ 	return -EINVAL;
+ }
+ 
+-static int __devexit ehca_remove(struct of_device *dev)
++static int __devexit ehca_remove(struct ibmebus_dev *dev)
+ {
+-	struct ehca_shca *shca = dev->dev.driver_data;
++	struct ehca_shca *shca = dev->ofdev.dev.driver_data;
+ 	int ret;
+ 
+-	sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
++	sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
+ 
+ 	if (ehca_open_aqp1 == 1) {
+ 		int i;
+@@ -925,14 +920,11 @@ static struct of_device_id ehca_device_t
+ };
+ MODULE_DEVICE_TABLE(of, ehca_device_table);
+ 
+-static struct of_platform_driver ehca_driver = {
+-	.name        = "ehca",
+-	.match_table = ehca_device_table,
+-	.probe       = ehca_probe,
+-	.remove      = ehca_remove,
+-	.driver	     = {
+-		.groups = ehca_drv_attr_groups,
+-	},
++static struct ibmebus_driver ehca_driver = {
++	.name     = "ehca",
++	.id_table = ehca_device_table,
++	.probe    = ehca_probe,
++	.remove   = ehca_remove,
+ };
+ 
+ void ehca_poll_eqs(unsigned long data)
+@@ -991,6 +983,10 @@ static int __init ehca_module_init(void)
+ 		goto module_init2;
+ 	}
+ 
++	ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
++	if (ret) /* only complain; we can live without attributes */
++		ehca_gen_err("Cannot create driver attributes  ret=%d", ret);
++
+ 	if (ehca_poll_all_eqs != 1) {
+ 		ehca_gen_err("WARNING!!!");
+ 		ehca_gen_err("It is possible to lose interrupts.");
+@@ -1016,6 +1012,7 @@ static void __exit ehca_module_exit(void
+ 	if (ehca_poll_all_eqs == 1)
+ 		del_timer_sync(&poll_eqs_timer);
+ 
++	sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
+ 	ibmebus_unregister_driver(&ehca_driver);
+ 
+ 	ehca_destroy_slab_caches();

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca_02_revert_interface_change.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca_02_revert_interface_change.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ehca_02_revert_interface_change.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,25 @@
+From 7ef1f7881a8f660654e7d1567213638b37adbbb5 Mon Sep 17 00:00:00 2001
+From: Stefan Roscher <stefan.roscher at de.ibm.com>
+Date: Wed, 6 Aug 2008 16:27:25 +0200
+Subject: [PATCH] Revert "infiniband: use performance variant for_each_cpu_mask_nr"
+
+This reverts commit 5d7bfd0c4d463d288422032c9903d0452dee141d.
+---
+ drivers/infiniband/hw/ehca/ehca_irq.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ehca/ehca_irq.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ehca/ehca_irq.c
++++ ofed_kernel/drivers/infiniband/hw/ehca/ehca_irq.c
+@@ -650,8 +650,8 @@ static inline int find_next_online_cpu(s
+ 		ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+ 
+ 	spin_lock_irqsave(&pool->last_cpu_lock, flags);
+-	cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+-	if (cpu >= nr_cpu_ids)
++	cpu = next_cpu(pool->last_cpu, cpu_online_map);
++	if (cpu == NR_CPUS)
+ 		cpu = first_cpu(cpu_online_map);
+ 	pool->last_cpu = cpu;
+ 	spin_unlock_irqrestore(&pool->last_cpu_lock, flags);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,58 @@
+---
+ drivers/infiniband/hw/ipath/ipath_sdma.c      |    2 +-
+ drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
+@@ -698,7 +698,7 @@ retry:
+ 
+ 	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
+ 			      tx->map_len, DMA_TO_DEVICE);
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
+@@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
+ 
+ 	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
+ 				DMA_TO_DEVICE);
+-	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
++	if (dma_mapping_error(dma_addr)) {
+ 		ret = -ENOMEM;
+ 		goto free_unmap;
+ 	}
+@@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(con
+ 				     pages[j], 0, flen, DMA_TO_DEVICE);
+ 		unsigned long fofs = addr & ~PAGE_MASK;
+ 
+-		if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
++		if (dma_mapping_error(dma_addr)) {
+ 			ret = -ENOMEM;
+ 			goto done;
+ 		}
+@@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(co
+ 		if (page) {
+ 			dma_addr = dma_map_page(&dd->pcidev->dev,
+ 						page, 0, len, DMA_TO_DEVICE);
+-			if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
++			if (dma_mapping_error(dma_addr)) {
+ 				ret = -ENOMEM;
+ 				goto free_pbc;
+ 			}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0010_revert_pid.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0010_revert_pid.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0010_revert_pid.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,141 @@
+Signed-off-by: Dave Olson <dave.olson at qlogic.com>
+---
+
+diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_driver.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_driver.c
+--- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_driver.c	2008-10-28 12:27:04.000000000 -0700
++++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_driver.c	2008-10-28 12:31:40.000000000 -0700
+@@ -2622,7 +2622,7 @@ int ipath_reset_device(int unit)
+ 				ipath_dbg("unit %u port %d is in use "
+ 					  "(PID %u cmd %s), can't reset\n",
+ 					  unit, i,
+-					  pid_nr(dd->ipath_pd[i]->port_pid),
++					  dd->ipath_pd[i]->port_pid,
+ 					  dd->ipath_pd[i]->port_comm);
+ 				ret = -EBUSY;
+ 				goto bail;
+@@ -2661,7 +2661,7 @@ bail:
+ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
+ {
+ 	int i, sub, any = 0;
+-	struct pid *pid;
++	pid_t pid;
+ 	unsigned long flags;
+ 	
+ 	if (!dd->ipath_pd)
+@@ -2669,7 +2669,8 @@ static int ipath_signal_procs(struct ipa
+ 
+ 	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
+ 	for (i = 1; i < dd->ipath_cfgports; i++) {
+-		if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
++		if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
++		    !dd->ipath_pd[i]->port_pid)
+ 			continue;
+ 		pid = dd->ipath_pd[i]->port_pid;
+ 		if (!pid)
+@@ -2677,8 +2678,8 @@ static int ipath_signal_procs(struct ipa
+ 
+ 		dev_info(&dd->pcidev->dev, "context %d in use "
+ 			  "(PID %u), sending signal %d\n",
+-			  i, pid_nr(pid), sig);
+-		kill_pid(pid, sig, 1);
++			  i, pid, sig);
++		kill_proc(pid, sig, 1);
+ 		any++;
+ 		for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
+ 			pid = dd->ipath_pd[i]->port_subpid[sub];
+@@ -2686,8 +2687,8 @@ static int ipath_signal_procs(struct ipa
+ 				continue;
+ 			dev_info(&dd->pcidev->dev, "sub-context "
+ 				"%d:%d in use (PID %u), sending "
+-				"signal %d\n", i, sub, pid_nr(pid), sig);
+-			kill_pid(pid, sig, 1);
++				"signal %d\n", i, sub, pid, sig);
++			kill_proc(pid, sig, 1);
+ 			any++;
+ 		}
+ 	}
+diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_file_ops.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_file_ops.c
+--- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_file_ops.c	2008-10-28 12:27:04.000000000 -0700
++++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_file_ops.c	2008-10-28 12:46:09.000000000 -0700
+@@ -556,7 +556,7 @@ static int ipath_tid_free(struct ipath_p
+ 			p = dd->ipath_pageshadow[porttid + tid];
+ 			dd->ipath_pageshadow[porttid + tid] = NULL;
+ 			ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
+-				   pid_nr(pd->port_pid), tid);
++				   pd->port_pid, tid);
+ 			dd->ipath_f_put_tid(dd, &tidbase[tid],
+ 					    RCVHQ_RCV_TYPE_EXPECTED,
+ 					    dd->ipath_tidinvalid);
+@@ -1610,7 +1610,7 @@ static int try_alloc_port(struct ipath_d
+ 			   port);
+ 		pd->port_cnt = 1;
+ 		port_fp(fp) = pd;
+-		pd->port_pid = get_pid(task_pid(current));
++		pd->port_pid = current->pid;
+ 		strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
+ 		ipath_stats.sps_ports++;
+ 		ret = 0;
+@@ -1794,15 +1794,14 @@ static int find_shared_port(struct file 
+ 			}
+ 			port_fp(fp) = pd;
+ 			subport_fp(fp) = pd->port_cnt++;
+-			pd->port_subpid[subport_fp(fp)] =
+-				get_pid(task_pid(current));
++			pd->port_subpid[subport_fp(fp)] = current->pid;
+ 			tidcursor_fp(fp) = 0;
+ 			pd->active_slaves |= 1 << subport_fp(fp);
+ 			ipath_cdbg(PROC,
+ 				   "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
+ 				   current->comm, current->pid,
+ 				   subport_fp(fp),
+-				   pd->port_comm, pid_nr(pd->port_pid),
++				   pd->port_comm, pd->port_pid,
+ 				   dd->ipath_unit, pd->port_port);
+ 			ret = 1;
+ 			goto done;
+@@ -2043,7 +2042,7 @@ static int ipath_close(struct inode *in,
+ 	struct ipath_devdata *dd;
+ 	unsigned long flags;
+ 	unsigned port;
+-	struct pid *pid;
++	pid_t pid;
+ 
+ 	ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
+ 		   (long)in->i_rdev, fp->private_data);
+@@ -2071,8 +2070,7 @@ static int ipath_close(struct inode *in,
+ 		 * the slave(s) don't wait for receive data forever.
+ 		 */
+ 		pd->active_slaves &= ~(1 << fd->subport);
+-		put_pid(pd->port_subpid[fd->subport]);
+-		pd->port_subpid[fd->subport] = NULL;
++		pd->port_subpid[fd->subport] = 0;
+ 		mutex_unlock(&ipath_mutex);
+ 		goto bail;
+ 	}
+@@ -2139,11 +2137,10 @@ static int ipath_close(struct inode *in,
+ 			unlock_expected_tids(pd);
+ 		ipath_stats.sps_ports--;
+ 		ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
+-			   pd->port_comm, pid_nr(pid),
++			   pd->port_comm, pid,
+ 			   dd->ipath_unit, port);
+ 	}
+ 
+-	put_pid(pid);
+ 	mutex_unlock(&ipath_mutex);
+ 	ipath_free_pddata(dd, pd); /* after releasing the mutex */
+ 
+diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_kernel.h ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_kernel.h
+--- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_kernel.h	2008-10-28 12:27:04.000000000 -0700
++++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_kernel.h	2008-10-28 12:29:17.000000000 -0700
+@@ -159,8 +159,8 @@ struct ipath_portdata {
+ 	/* saved total number of polled urgent packets for poll edge trigger */
+ 	u32 port_urgent_poll;
+ 	/* pid of process using this port */
+-	struct pid *port_pid;
+-	struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
++	pid_t port_pid;
++	pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
+ 	/* same size as task_struct .comm[] */
+ 	char port_comm[16];
+ 	/* pkeys set by this use of this port */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0020_class_dev_to_device.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0020_class_dev_to_device.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0020_class_dev_to_device.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,207 @@
+BACKPORT - revert struct class_dev to device
+
+This patch reverts commit f4e91eb4a81559da87a3843758a641b5cc590b65 in 2.6.26.
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+---
+ drivers/infiniband/hw/ipath/ipath_diag.c     |   10 +++---
+ drivers/infiniband/hw/ipath/ipath_file_ops.c |   44 +++++++++++++--------------
+ drivers/infiniband/hw/ipath/ipath_kernel.h   |    8 ++--
+ 3 files changed, 31 insertions(+), 31 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_diag.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_diag.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_diag.c
+@@ -79,7 +79,7 @@ static const struct file_operations diag
+ 
+ static atomic_t diagpkt_count = ATOMIC_INIT(0);
+ static struct cdev *diagpkt_cdev;
+-static struct device *diagpkt_dev;
++static struct class_device *diagpkt_class_dev;
+ 
+ int ipath_diag_add(struct ipath_devdata *dd)
+ {
+@@ -89,7 +89,7 @@ int ipath_diag_add(struct ipath_devdata 
+ 	if (atomic_inc_return(&diagpkt_count) == 1) {
+ 		ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
+ 				      "ipath_diagpkt", &diagpkt_file_ops,
+-				      &diagpkt_cdev, &diagpkt_dev);
++				      &diagpkt_cdev, &diagpkt_class_dev);
+ 
+ 		if (ret) {
+ 			ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
+@@ -102,7 +102,7 @@ int ipath_diag_add(struct ipath_devdata 
+ 
+ 	ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
+ 			      &diag_file_ops, &dd->diag_cdev,
+-			      &dd->diag_dev);
++			      &dd->diag_class_dev);
+ 	if (ret)
+ 		ipath_dev_err(dd, "Couldn't create %s device: %d",
+ 			      name, ret);
+@@ -114,9 +114,9 @@ done:
+ void ipath_diag_remove(struct ipath_devdata *dd)
+ {
+ 	if (atomic_dec_and_test(&diagpkt_count))
+-		ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_dev);
++		ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev);
+ 
+-	ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_dev);
++	ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev);
+ }
+ 
+ /**
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -2424,11 +2424,11 @@ static ssize_t ipath_writev(struct kiocb
+ static struct class *ipath_class;
+ 
+ static int init_cdev(int minor, char *name, const struct file_operations *fops,
+-		     struct cdev **cdevp, struct device **devp)
++		     struct cdev **cdevp, struct class_device **class_devp)
+ {
+ 	const dev_t dev = MKDEV(IPATH_MAJOR, minor);
+ 	struct cdev *cdev = NULL;
+-	struct device *device = NULL;
++	struct class_device *class_dev = NULL;
+ 	int ret;
+ 
+ 	cdev = cdev_alloc();
+@@ -2452,12 +2452,12 @@ static int init_cdev(int minor, char *na
+ 		goto err_cdev;
+ 	}
+ 
+-	device = device_create_drvdata(ipath_class, NULL, dev, NULL, name);
++	class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
+ 
+-	if (IS_ERR(device)) {
+-		ret = PTR_ERR(device);
++	if (IS_ERR(class_dev)) {
++		ret = PTR_ERR(class_dev);
+ 		printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
+-		       "device for minor %d, %s (err %d)\n",
++		       "class_dev for minor %d, %s (err %d)\n",
+ 		       minor, name, -ret);
+ 		goto err_cdev;
+ 	}
+@@ -2471,29 +2471,29 @@ err_cdev:
+ done:
+ 	if (ret >= 0) {
+ 		*cdevp = cdev;
+-		*devp = device;
++		*class_devp = class_dev;
+ 	} else {
+ 		*cdevp = NULL;
+-		*devp = NULL;
++		*class_devp = NULL;
+ 	}
+ 
+ 	return ret;
+ }
+ 
+ int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
+-		    struct cdev **cdevp, struct device **devp)
++		    struct cdev **cdevp, struct class_device **class_devp)
+ {
+-	return init_cdev(minor, name, fops, cdevp, devp);
++	return init_cdev(minor, name, fops, cdevp, class_devp);
+ }
+ 
+ static void cleanup_cdev(struct cdev **cdevp,
+-			 struct device **devp)
++			 struct class_device **class_devp)
+ {
+-	struct device *dev = *devp;
++	struct class_device *class_dev = *class_devp;
+ 
+-	if (dev) {
+-		device_unregister(dev);
+-		*devp = NULL;
++	if (class_dev) {
++		class_device_unregister(class_dev);
++		*class_devp = NULL;
+ 	}
+ 
+ 	if (*cdevp) {
+@@ -2503,13 +2503,13 @@ static void cleanup_cdev(struct cdev **c
+ }
+ 
+ void ipath_cdev_cleanup(struct cdev **cdevp,
+-			struct device **devp)
++			struct class_device **class_devp)
+ {
+-	cleanup_cdev(cdevp, devp);
++	cleanup_cdev(cdevp, class_devp);
+ }
+ 
+ static struct cdev *wildcard_cdev;
+-static struct device *wildcard_dev;
++static struct class_device *wildcard_class_dev;
+ 
+ static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
+ 
+@@ -2566,7 +2566,7 @@ int ipath_user_add(struct ipath_devdata 
+ 			goto bail;
+ 		}
+ 		ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
+-				&wildcard_dev);
++				&wildcard_class_dev);
+ 		if (ret < 0) {
+ 			ipath_dev_err(dd, "Could not create wildcard "
+ 				      "minor: error %d\n", -ret);
+@@ -2579,7 +2579,7 @@ int ipath_user_add(struct ipath_devdata 
+ 	snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
+ 
+ 	ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
+-			&dd->user_cdev, &dd->user_dev);
++			&dd->user_cdev, &dd->user_class_dev);
+ 	if (ret < 0)
+ 		ipath_dev_err(dd, "Could not create user minor %d, %s\n",
+ 			      dd->ipath_unit + 1, name);
+@@ -2594,13 +2594,13 @@ bail:
+ 
+ void ipath_user_remove(struct ipath_devdata *dd)
+ {
+-	cleanup_cdev(&dd->user_cdev, &dd->user_dev);
++	cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
+ 
+ 	if (atomic_dec_return(&user_count) == 0) {
+ 		if (atomic_read(&user_setup) == 0)
+ 			goto bail;
+ 
+-		cleanup_cdev(&wildcard_cdev, &wildcard_dev);
++		cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
+ 		user_cleanup();
+ 
+ 		atomic_set(&user_setup, 0);
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+@@ -477,8 +477,8 @@ struct ipath_devdata {
+ 	struct pci_dev *pcidev;
+ 	struct cdev *user_cdev;
+ 	struct cdev *diag_cdev;
+-	struct device *user_dev;
+-	struct device *diag_dev;
++	struct class_device *user_class_dev;
++	struct class_device *diag_class_dev;
+ 	/* timer used to prevent stats overflow, error throttling, etc. */
+ 	struct timer_list ipath_stats_timer;
+ 	/* timer to verify interrupts work, and fallback if possible */
+@@ -865,9 +865,9 @@ void ipath_clear_freeze(struct ipath_dev
+ 
+ struct file_operations;
+ int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
+-		    struct cdev **cdevp, struct device **devp);
++		    struct cdev **cdevp, struct class_device **class_devp);
+ void ipath_cdev_cleanup(struct cdev **cdevp,
+-			struct device **devp);
++			struct class_device **class_devp);
+ 
+ int ipath_diag_add(struct ipath_devdata *);
+ void ipath_diag_remove(struct ipath_devdata *);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0030_revert_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0030_revert_sysfs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0030_revert_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,113 @@
+BACKPORT - revert to older code for creating sysfs driver groups
+
+This reverts commit 23b9c1ab5baf368a32b7242bf110ef1f48700d04 in 2.6.25
+    
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+---
+ drivers/infiniband/hw/ipath/ipath_driver.c |   17 +++++++++++++----
+ drivers/infiniband/hw/ipath/ipath_kernel.h |    3 ++-
+ drivers/infiniband/hw/ipath/ipath_sysfs.c  |   19 ++++++++++++++-----
+ 3 files changed, 29 insertions(+), 10 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_driver.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_driver.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_driver.c
+@@ -151,9 +151,6 @@ static struct pci_driver ipath_driver = 
+ 	.probe = ipath_init_one,
+ 	.remove = __devexit_p(ipath_remove_one),
+ 	.id_table = ipath_pci_tbl,
+-	.driver = {
+-		.groups = ipath_driver_attr_groups,
+-	},
+ };
+ 
+ static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
+@@ -2544,15 +2541,25 @@ static int __init infinipath_init(void)
+ 		goto bail_unit;
+ 	}
+ 
++	ret = ipath_driver_create_group(&ipath_driver.driver);
++	if (ret < 0) {
++		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
++		       "sysfs entries: error %d\n", -ret);
++		goto bail_pci;
++	}
++
+ 	ret = ipath_init_ipathfs();
+ 	if (ret < 0) {
+ 		printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
+ 		       "ipathfs: error %d\n", -ret);
+-		goto bail_pci;
++		goto bail_group;
+ 	}
+ 
+ 	goto bail;
+ 
++bail_group:
++	ipath_driver_remove_group(&ipath_driver.driver);
++
+ bail_pci:
+ 	pci_unregister_driver(&ipath_driver);
+ 
+@@ -2567,6 +2574,8 @@ static void __exit infinipath_cleanup(vo
+ {
+ 	ipath_exit_ipathfs();
+ 
++	ipath_driver_remove_group(&ipath_driver.driver);
++
+ 	ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
+ 	pci_unregister_driver(&ipath_driver);
+ 
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+@@ -1271,7 +1271,8 @@ struct device_driver;
+ 
+ extern const char ib_ipath_version[];
+ 
+-extern struct attribute_group *ipath_driver_attr_groups[];
++int ipath_driver_create_group(struct device_driver *);
++void ipath_driver_remove_group(struct device_driver *);
+ 
+ int ipath_device_create_group(struct device *, struct ipath_devdata *);
+ void ipath_device_remove_group(struct device *, struct ipath_devdata *);
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sysfs.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sysfs.c
+@@ -1069,11 +1069,6 @@ static ssize_t show_tempsense(struct dev
+ 	return ret;
+ }
+ 
+-struct attribute_group *ipath_driver_attr_groups[] = {
+-	&driver_attr_group,
+-	NULL,
+-};
+-
+ static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
+ static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
+ static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
+@@ -1181,6 +1176,20 @@ int ipath_expose_reset(struct device *de
+ 	return ret;
+ }
+ 
++int ipath_driver_create_group(struct device_driver *drv)
++{
++	int ret;
++
++	ret = sysfs_create_group(&drv->kobj, &driver_attr_group);
++
++	return ret;
++}
++
++void ipath_driver_remove_group(struct device_driver *drv)
++{
++	sysfs_remove_group(&drv->kobj, &driver_attr_group);
++}
++
+ int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
+ {
+ 	int ret;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0040_nopage_to_fault.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0040_nopage_to_fault.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0040_nopage_to_fault.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,81 @@
+IB/ipath - revert struct vm_operations_struct nopage to fault rename
+
+This patch reverts commit 3c8450860ba9d6279dbc969633eacf99161860d9 in 2.6.25
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+---
+ drivers/infiniband/hw/ipath/ipath_debug.h    |    4 +--
+ drivers/infiniband/hw/ipath/ipath_file_ops.c |   29 +++++++++++++++++----------
+ 2 files changed, 21 insertions(+), 12 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_debug.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_debug.h
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_debug.h
+@@ -55,7 +55,7 @@
+ #define __IPATH_PKTDBG      0x80	/* print packet data */
+ /* print process startup (init)/exit messages */
+ #define __IPATH_PROCDBG     0x100
+-/* print mmap/fault stuff, not using VDBG any more */
++/* print mmap/nopage stuff, not using VDBG any more */
+ #define __IPATH_MMDBG       0x200
+ #define __IPATH_ERRPKTDBG   0x400
+ #define __IPATH_USER_SEND   0x1000	/* use user mode send */
+@@ -82,7 +82,7 @@
+ #define __IPATH_VERBDBG   0x0	/* very verbose debug */
+ #define __IPATH_PKTDBG    0x0	/* print packet data */
+ #define __IPATH_PROCDBG   0x0	/* process startup (init)/exit messages */
+-/* print mmap/fault stuff, not using VDBG any more */
++/* print mmap/nopage stuff, not using VDBG any more */
+ #define __IPATH_MMDBG     0x0
+ #define __IPATH_EPKTDBG   0x0	/* print ethernet packet data */
+ #define __IPATH_IPATHDBG  0x0	/* Ethernet (IPATH) table dump on */
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -1130,24 +1130,33 @@ bail:
+ }
+ 
+ /*
+- * ipath_file_vma_fault - handle a VMA page fault.
++ * ipath_file_vma_nopage - handle a VMA page fault.
+  */
+-static int ipath_file_vma_fault(struct vm_area_struct *vma,
+-					struct vm_fault *vmf)
++static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma,
++					  unsigned long address, int *type)
+ {
+-	struct page *page;
++	unsigned long offset = address - vma->vm_start;
++	struct page *page = NOPAGE_SIGBUS;
++	void *pageptr;
+ 
+-	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
++	/*
++	 * Convert the vmalloc address into a struct page.
++	 */
++	pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT));
++	page = vmalloc_to_page(pageptr);
+ 	if (!page)
+-		return VM_FAULT_SIGBUS;
+-	get_page(page);
+-	vmf->page = page;
++		goto out;
+ 
+-	return 0;
++	/* Increment the reference count. */
++	get_page(page);
++	if (type)
++		*type = VM_FAULT_MINOR;
++out:
++	return page;
+ }
+ 
+ static struct vm_operations_struct ipath_file_vm_ops = {
+-	.fault = ipath_file_vma_fault,
++	.nopage = ipath_file_vma_nopage,
+ };
+ 
+ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0050_aio_write.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0050_aio_write.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0050_aio_write.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,44 @@
+BACKPORT - Use writev instead of aio_write in 2.6.18 and older kernels
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+---
+ drivers/infiniband/hw/ipath/ipath_file_ops.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -50,15 +50,15 @@ static int ipath_open(struct inode *, st
+ static int ipath_close(struct inode *, struct file *);
+ static ssize_t ipath_write(struct file *, const char __user *, size_t,
+ 			   loff_t *);
+-static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
+-			    unsigned long , loff_t);
++static ssize_t ipath_writev(struct file *, const struct iovec *,
++			    unsigned long , loff_t *);
+ static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
+ static int ipath_mmap(struct file *, struct vm_area_struct *);
+ 
+ static const struct file_operations ipath_file_ops = {
+ 	.owner = THIS_MODULE,
+ 	.write = ipath_write,
+-	.aio_write = ipath_writev,
++	.writev = ipath_writev,
+ 	.open = ipath_open,
+ 	.release = ipath_close,
+ 	.poll = ipath_poll,
+@@ -2416,10 +2416,9 @@ bail:
+ 	return ret;
+ }
+ 
+-static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
+-			    unsigned long dim, loff_t off)
++static ssize_t ipath_writev(struct file *filp, const struct iovec *iov,
++			    unsigned long dim, loff_t *off)
+ {
+-	struct file *filp = iocb->ki_filp;
+ 	struct ipath_filedata *fp = filp->private_data;
+ 	struct ipath_portdata *pd = port_fp(filp);
+ 	struct ipath_user_sdma_queue *pq = fp->pq;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0060_htirq.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0060_htirq.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0060_htirq.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,164 @@
+BACKPORT - use old HT IRQ infrastructure on 2.6.18 and earlier
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+---
+ drivers/infiniband/hw/ipath/Makefile        |    2 
+ drivers/infiniband/hw/ipath/ipath_driver.c  |    2 
+ drivers/infiniband/hw/ipath/ipath_iba6110.c |   82 ++++++++++++++++++----------
+ 3 files changed, 57 insertions(+), 29 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ipath/Makefile
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/Makefile
++++ ofed_kernel/drivers/infiniband/hw/ipath/Makefile
+@@ -34,7 +34,7 @@ ib_ipath-y := \
+ 	ipath_sd7220.o \
+ 	ipath_sd7220_img.o
+ 
+-ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
++ib_ipath-y += ipath_iba6110.o
+ ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
+ 
+ ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_driver.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_driver.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_driver.c
+@@ -42,6 +42,8 @@
+ #include "ipath_kernel.h"
+ #include "ipath_verbs.h"
+ 
++#define CONFIG_HT_IRQ
++
+ static void ipath_update_pio_bufs(struct ipath_devdata *);
+ 
+ const char *ipath_get_unit_name(int unit)
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_iba6110.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_iba6110.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_iba6110.c
+@@ -39,7 +39,6 @@
+ #include <linux/vmalloc.h>
+ #include <linux/pci.h>
+ #include <linux/delay.h>
+-#include <linux/htirq.h>
+ #include <rdma/ib_verbs.h>
+ 
+ #include "ipath_kernel.h"
+@@ -986,23 +985,50 @@ static int ipath_ht_intconfig(struct ipa
+ 	return ret;
+ }
+ 
+-static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
+-				struct ht_irq_msg *msg)
++static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
++			    int pos)
+ {
+-	struct ipath_devdata *dd = pci_get_drvdata(dev);
+-	u64 prev_intconfig = dd->ipath_intconfig;
++	u32 int_handler_addr_lower;
++	u32 int_handler_addr_upper;
++	u64 ihandler;
++	u32 intvec;
++
++	/* use indirection register to get the intr handler */
++	pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x10);
++	pci_read_config_dword(pdev, pos + 4, &int_handler_addr_lower);
++	pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x11);
++	pci_read_config_dword(pdev, pos + 4, &int_handler_addr_upper);
++
++	ihandler = (u64) int_handler_addr_lower |
++		((u64) int_handler_addr_upper << 32);
++
++	/*
++	 * kernels with CONFIG_PCI_MSI set the vector in the irq field of
++	 * struct pci_device, so we use that to program the internal
++	 * interrupt register (not config space) with that value. The BIOS
++	 * must still have done the basic MSI setup.
++	 */
++	intvec = pdev->irq;
++	/*
++	 * clear any vector bits there; normally not set but we'll overload
++	 * this for some debug purposes (setting the HTC debug register
++	 * value from software, rather than GPIOs), so it might be set on a
++	 * driver reload.
++	 */
++	ihandler &= ~0xff0000;
++	/* x86 vector goes in intrinfo[23:16] */
++	ihandler |= intvec << 16;
++	ipath_cdbg(VERBOSE, "ihandler lower %x, upper %x, intvec %x, "
++		   "interruptconfig %llx\n", int_handler_addr_lower,
++		   int_handler_addr_upper, intvec,
++		   (unsigned long long) ihandler);
++
++	/* can't program yet, so save for interrupt setup */
++	dd->ipath_intconfig = ihandler;
++	dd->ipath_irq = intvec;
++	/* keep going, so we find link control stuff also */
+ 
+-	dd->ipath_intconfig = msg->address_lo;
+-	dd->ipath_intconfig |= ((u64) msg->address_hi) << 32;
+-
+-	/*
+-	 * If the previous value of dd->ipath_intconfig is zero, we're
+-	 * getting configured for the first time, and must not program the
+-	 * intconfig register here (it will be programmed later, when the
+-	 * hardware is ready).  Otherwise, we should.
+-	 */
+-	if (prev_intconfig)
+-		ipath_ht_intconfig(dd);
++	return ihandler != 0;
+ }
+ 
+ /**
+@@ -1018,19 +1044,12 @@ static void ipath_ht_irq_update(struct p
+ static int ipath_setup_ht_config(struct ipath_devdata *dd,
+ 				 struct pci_dev *pdev)
+ {
+-	int pos, ret;
+-
+-	ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update);
+-	if (ret < 0) {
+-		ipath_dev_err(dd, "Couldn't create interrupt handler: "
+-			      "err %d\n", ret);
+-		goto bail;
+-	}
+-	dd->ipath_irq = ret;
+-	ret = 0;
++	int pos, ret = 0;
++	int ihandler = 0;
+ 
+ 	/*
+-	 * Handle clearing CRC errors in linkctrl register if necessary.  We
++	 * Read the capability info to find the interrupt info, and also
++	 * handle clearing CRC errors in linkctrl register if necessary.  We
+ 	 * do this early, before we ever enable errors or hardware errors,
+ 	 * mostly to avoid causing the chip to enter freeze mode.
+ 	 */
+@@ -1055,9 +1074,17 @@ static int ipath_setup_ht_config(struct 
+ 		}
+ 		if (!(cap_type & 0xE0))
+ 			slave_or_pri_blk(dd, pdev, pos, cap_type);
++		else if (cap_type == HT_INTR_DISC_CONFIG)
++			ihandler = set_int_handler(dd, pdev, pos);
+ 	} while ((pos = pci_find_next_capability(pdev, pos,
+ 						 PCI_CAP_ID_HT)));
+ 
++	if (!ihandler) {
++		ipath_dev_err(dd, "Couldn't find interrupt handler in "
++			      "config space\n");
++		ret = -ENODEV;
++	}
++
+ 	dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
+ 
+ bail:
+@@ -1690,7 +1717,6 @@ static int ipath_ht_get_base_info(struct
+ static void ipath_ht_free_irq(struct ipath_devdata *dd)
+ {
+ 	free_irq(dd->ipath_irq, dd);
+-	ht_destroy_irq(dd->ipath_irq);
+ 	dd->ipath_irq = 0;
+ 	dd->ipath_intconfig = 0;
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0070_vmalloc_user-2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0070_vmalloc_user-2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0070_vmalloc_user-2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,143 @@
+BACKPORT - avoid using vmalloc_user() in 2.6.18 due to bugs.
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+diff -up a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
+--- a/drivers/infiniband/hw/ipath/ipath_cq.c	2008-10-24 10:40:30.000000000 -0700
++++ b/drivers/infiniband/hw/ipath/ipath_cq.c	2008-10-24 10:45:56.000000000 -0700
+@@ -230,11 +230,12 @@ struct ib_cq *ipath_create_cq(struct ib_
+ 		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+ 	else
+ 		sz += sizeof(struct ib_wc) * (entries + 1);
+-	wc = vmalloc_user(sz);
++	wc = vmalloc(sz);
+ 	if (!wc) {
+ 		ret = ERR_PTR(-ENOMEM);
+ 		goto bail_cq;
+ 	}
++	memset(wc, 0, sz);
+ 
+ 	/*
+ 	 * Return the address of the WC as the offset to mmap.
+@@ -389,11 +390,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, 
+ 		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ 	else
+ 		sz += sizeof(struct ib_wc) * (cqe + 1);
+-	wc = vmalloc_user(sz);
++	wc = vmalloc(sz);
+ 	if (!wc) {
+ 		ret = -ENOMEM;
+ 		goto bail;
+ 	}
++	memset(wc, 0, sz);
+ 
+ 	/* Check that we can write the offset to mmap. */
+ 	if (udata && udata->outlen >= sizeof(__u64)) {
+diff -up a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
+--- a/drivers/infiniband/hw/ipath/ipath_mmap.c	2008-10-24 10:40:30.000000000 -0700
++++ b/drivers/infiniband/hw/ipath/ipath_mmap.c	2008-10-24 10:45:56.000000000 -0700
+@@ -74,9 +74,40 @@ static void ipath_vma_close(struct vm_ar
+ 	kref_put(&ip->ref, ipath_release_mmap_info);
+ }
+ 
++/*
++ * ipath_vma_nopage - handle a VMA page fault.
++ */
++static struct page *ipath_vma_nopage(struct vm_area_struct *vma,
++				     unsigned long address, int *type)
++{
++	struct ipath_mmap_info *ip = vma->vm_private_data;
++	unsigned long offset = address - vma->vm_start;
++	struct page *page = NOPAGE_SIGBUS;
++	void *pageptr;
++
++	if (offset >= ip->size)
++		goto out; /* out of range */
++
++	/*
++	 * Convert the vmalloc address into a struct page.
++	 */
++	pageptr = (void *)(offset + ip->obj);
++	page = vmalloc_to_page(pageptr);
++	if (!page)
++		goto out;
++
++	/* Increment the reference count. */
++	get_page(page);
++	if (type)
++		*type = VM_FAULT_MINOR;
++out:
++	return page;
++}
++
+ static struct vm_operations_struct ipath_vm_ops = {
+ 	.open =     ipath_vma_open,
+ 	.close =    ipath_vma_close,
++	.nopage =   ipath_vma_nopage,
+ };
+ 
+ /**
+@@ -111,10 +142,10 @@ int ipath_mmap(struct ib_ucontext *conte
+ 		list_del_init(&ip->pending_mmaps);
+ 		spin_unlock_irq(&dev->pending_lock);
+ 
+-		ret = remap_vmalloc_range(vma, ip->obj, 0);
+-		if (ret)
+-			goto done;
++		ret = 0;
++
+ 		vma->vm_ops = &ipath_vm_ops;
++		vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ 		vma->vm_private_data = ip;
+ 		ipath_vma_open(vma);
+ 		goto done;
+diff -up a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
+--- a/drivers/infiniband/hw/ipath/ipath_qp.c	2008-10-24 10:40:33.000000000 -0700
++++ b/drivers/infiniband/hw/ipath/ipath_qp.c	2008-10-24 10:47:56.000000000 -0700
+@@ -827,12 +827,14 @@ struct ib_qp *ipath_create_qp(struct ib_
+ 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ 				sizeof(struct ipath_rwqe);
+-			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
++			qp->r_rq.wq = vmalloc(sizeof(struct ipath_rwq) +
+ 					      qp->r_rq.size * sz);
+ 			if (!qp->r_rq.wq) {
+ 				ret = ERR_PTR(-ENOMEM);
+ 				goto bail_sg_list;
+ 			}
++			memset(qp->r_rq.wq, 0,
++			       sizeof(struct ipath_rwq) + qp->r_rq.size * sz);
+ 		}
+ 
+ 		/*
+diff -up a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
+--- a/drivers/infiniband/hw/ipath/ipath_srq.c	2008-10-24 10:40:30.000000000 -0700
++++ b/drivers/infiniband/hw/ipath/ipath_srq.c	2008-10-24 10:45:56.000000000 -0700
+@@ -130,11 +130,12 @@ struct ib_srq *ipath_create_srq(struct i
+ 	srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ 	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
+ 		sizeof(struct ipath_rwqe);
+-	srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
++	srq->rq.wq = vmalloc(sizeof(struct ipath_rwq) + srq->rq.size * sz);
+ 	if (!srq->rq.wq) {
+ 		ret = ERR_PTR(-ENOMEM);
+ 		goto bail_srq;
+ 	}
++	memset(srq->rq.wq, 0, sizeof(struct ipath_rwq) + srq->rq.size * sz);
+ 
+ 	/*
+ 	 * Return the address of the RWQ as the offset to mmap.
+@@ -230,11 +231,12 @@ int ipath_modify_srq(struct ib_srq *ibsr
+ 		sz = sizeof(struct ipath_rwqe) +
+ 			srq->rq.max_sge * sizeof(struct ib_sge);
+ 		size = attr->max_wr + 1;
+-		wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
++		wq = vmalloc(sizeof(struct ipath_rwq) + size * sz);
+ 		if (!wq) {
+ 			ret = -ENOMEM;
+ 			goto bail;
+ 		}
++		memset(wq, 0, sizeof(struct ipath_rwq) + size * sz);
+ 
+ 		/* Check that we can write the offset to mmap. */
+ 		if (udata && udata->inlen >= sizeof(__u64)) {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0080_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0080_sysfs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0080_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,85 @@
+---
+ drivers/infiniband/hw/ipath/ipath_verbs.c |   39 +++++++++++++-----------------
+ 1 file changed, 18 insertions(+), 21 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_verbs.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.c
+@@ -2235,20 +2235,18 @@ void ipath_unregister_ib_device(struct i
+ 	ib_dealloc_device(ibdev);
+ }
+ 
+-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_rev(struct class_device *cdev, char *buf)
+ {
+ 	struct ipath_ibdev *dev =
+-		container_of(device, struct ipath_ibdev, ibdev.dev);
++		container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ 
+ 	return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
+ }
+ 
+-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_hca(struct class_device *cdev, char *buf)
+ {
+ 	struct ipath_ibdev *dev =
+-		container_of(device, struct ipath_ibdev, ibdev.dev);
++		container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ 	int ret;
+ 
+ 	ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
+@@ -2261,11 +2259,10 @@ bail:
+ 	return ret;
+ }
+ 
+-static ssize_t show_stats(struct device *device, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_stats(struct class_device *cdev, char *buf)
+ {
+ 	struct ipath_ibdev *dev =
+-		container_of(device, struct ipath_ibdev, ibdev.dev);
++		container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
+ 	int i;
+ 	int len;
+ 
+@@ -2300,16 +2297,16 @@ static ssize_t show_stats(struct device 
+ 	return len;
+ }
+ 
+-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
+-static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
+-
+-static struct device_attribute *ipath_class_attributes[] = {
+-	&dev_attr_hw_rev,
+-	&dev_attr_hca_type,
+-	&dev_attr_board_id,
+-	&dev_attr_stats
++static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
++static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
++static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
++static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
++
++static struct class_device_attribute *ipath_class_attributes[] = {
++	&class_device_attr_hw_rev,
++	&class_device_attr_hca_type,
++	&class_device_attr_board_id,
++	&class_device_attr_stats
+ };
+ 
+ static int ipath_verbs_register_sysfs(struct ib_device *dev)
+@@ -2318,8 +2315,8 @@ static int ipath_verbs_register_sysfs(st
+ 	int ret;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
+-		if (device_create_file(&dev->dev,
+-				       ipath_class_attributes[i])) {
++		if (class_device_create_file(&dev->class_dev,
++					       ipath_class_attributes[i])) {
+ 			ret = 1;
+ 			goto bail;
+ 		}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0095_pat.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0095_pat.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipath_0095_pat.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,77 @@
+--- a/drivers/infiniband/hw/ipath/ipath_wc_pat.c.orig	2008-11-04 13:46:56.557658000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_wc_pat.c	2008-11-04 13:47:50.521639000 -0800
+@@ -182,7 +182,7 @@
+ 	preempt_disable();
+ 	rd_old_pat(&ret);
+ 	if (!ret)
+-		smp_call_function(rd_old_pat, &ret, 1);
++		smp_call_function(rd_old_pat, &ret, 1, 1);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -193,7 +193,7 @@
+ 	if (ret)
+ 		goto out;
+ 
+-	smp_call_function(wr_new_pat, &ret, 1);
++	smp_call_function(wr_new_pat, &ret, 1, 1);
+ 	BUG_ON(ret); /* have inconsistent PAT state */
+ out:
+ 	preempt_enable();
+@@ -207,7 +207,7 @@
+ 	preempt_disable();
+ 	wr_old_pat(&ret);
+ 	if (!ret) {
+-		smp_call_function(wr_old_pat, &ret, 1);
++		smp_call_function(wr_old_pat, &ret, 1, 1);
+ 		BUG_ON(ret); /* have inconsistent PAT state */
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/ipath/ipath_wc_pat.c b/drivers/infiniband/hw/ipath/ipath_wc_pat.c
+index 8edf2fb..5233eac 100644
+--- a/drivers/infiniband/hw/ipath/ipath_wc_pat.c
++++ b/drivers/infiniband/hw/ipath/ipath_wc_pat.c
+@@ -217,7 +217,7 @@
+ 
+ int ipath_enable_wc_pat(void)
+ {
+-	struct cpuinfo_x86 *c = &cpu_data(0);
++	struct cpuinfo_x86 *c = &(cpu_data)[0];
+ 	int ret;
+ 
+ 	if (wc_enabled)
+@@ -257,6 +257,11 @@ pgprot_t pgprot_wc(pgprot_t _prot)
+ 		pgprot_noncached(_prot);
+ }
+ 
++void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
++{
++	return __ioremap(phys_addr, size, IPATH_WC_FLAGS);
++}
++
+ int ipath_wc_pat_enabled(void)
+ {
+ 	return wc_enabled;
+@@ -272,6 +277,11 @@ pgprot_t pgprot_wc(pgprot_t _prot)
+ 	return pgprot_noncached(_prot);
+ }
+ 
++void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
++{
++	return ioremap_nocache(phys_addr, size);
++}
++
+ int ipath_wc_pat_enabled(void)
+ {
+ 	return 0;
+diff --git a/drivers/infiniband/hw/ipath/ipath_wc_pat.h b/drivers/infiniband/hw/ipath/ipath_wc_pat.h
+index 28ba52f..1b17661 100644
+--- a/drivers/infiniband/hw/ipath/ipath_wc_pat.h
++++ b/drivers/infiniband/hw/ipath/ipath_wc_pat.h
+@@ -42,5 +42,6 @@ int ipath_enable_wc_pat(void);
+ void ipath_disable_wc_pat(void);
+ int ipath_wc_pat_enabled(void);
+ pgprot_t pgprot_wc(pgprot_t _prot);
++void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size);
+ 
+ #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0100_to_2.6.21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,458 @@
+Backport IPOIB to kernel 2.6.23
+
+Signed-off-by: Eli Cohen <eli at mellanox.co.il>
+
+---
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib.h           |    6 +-
+ drivers/infiniband/ulp/ipoib/ipoib_cm.c        |   20 ++++-----
+ drivers/infiniband/ulp/ipoib/ipoib_ib.c        |   55 +++++++++++++------------
+ drivers/infiniband/ulp/ipoib/ipoib_main.c      |   34 +++++----------
+ drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
+ 5 files changed, 61 insertions(+), 64 deletions(-)
+
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
+===================================================================
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
+ 
+ 	struct net_device *dev;
+ 
+-	struct napi_struct napi;
+-
+ 	unsigned long flags;
+ 
+ 	struct mutex vlan_mutex;
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
+ 
+ 	struct ib_event_handler event_handler;
+ 
++	struct net_device_stats stats;
++
+ 	struct net_device *parent;
+ 	struct list_head child_intfs;
+ 	struct list_head list;
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
+ 
+ /* functions */
+ 
+-int ipoib_poll(struct napi_struct *napi, int budget);
++int ipoib_poll(struct net_device *dev, int *budget);
+ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
+ void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
+ 
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+===================================================================
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
+@@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+ 		ipoib_dbg(priv, "cm recv error "
+ 			   "(status=%d, wrid=%d vend_err %x)\n",
+ 			   wc->status, wr_id, wc->vendor_err);
+-		++dev->stats.rx_dropped;
++		++priv->stats.rx_dropped;
+ 		if (has_srq)
+ 			goto repost;
+ 		else {
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+ 		 * this packet and reuse the old buffer.
+ 		 */
+ 		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
+-		++dev->stats.rx_dropped;
++		++priv->stats.rx_dropped;
+ 		goto repost;
+ 	}
+ 
+@@ -664,8 +664,8 @@ copied:
+ 	skb_pull(skb, IPOIB_ENCAP_LEN);
+ 
+ 	dev->last_rx = jiffies;
+-	++dev->stats.rx_packets;
+-	dev->stats.rx_bytes += skb->len;
++	++priv->stats.rx_packets;
++	priv->stats.rx_bytes += skb->len;
+ 
+ 	skb->dev = dev;
+ 	/* XXX get correct PACKET_ type here */
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
+ 	if (unlikely(skb->len > tx->mtu)) {
+ 		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
+ 			   skb->len, tx->mtu);
+-		++dev->stats.tx_dropped;
+-		++dev->stats.tx_errors;
++		++priv->stats.tx_dropped;
++		++priv->stats.tx_errors;
+ 		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
+ 		return;
+ 	}
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
+ 	tx_req->skb = skb;
+ 	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
+ 	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+-		++dev->stats.tx_errors;
++		++priv->stats.tx_errors;
+ 		dev_kfree_skb_any(skb);
+ 		return;
+ 	}
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
+ 	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
+ 			       addr, skb->len))) {
+ 		ipoib_warn(priv, "post_send failed\n");
+-		++dev->stats.tx_errors;
++		++priv->stats.tx_errors;
+ 		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+ 		dev_kfree_skb_any(skb);
+ 	} else {
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+ 	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
+ 
+ 	/* FIXME: is this right? Shouldn't we only increment on success? */
+-	++dev->stats.tx_packets;
+-	dev->stats.tx_bytes += tx_req->skb->len;
++	++priv->stats.tx_packets;
++	priv->stats.tx_bytes += tx_req->skb->len;
+ 
+ 	dev_kfree_skb_any(tx_req->skb);
+ 
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+===================================================================
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
+@@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
+ 	 * this packet and reuse the old buffer.
+ 	 */
+ 	if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
+-		++dev->stats.rx_dropped;
++		++priv->stats.rx_dropped;
+ 		goto repost;
+ 	}
+ 
+@@ -278,8 +278,8 @@ static void ipoib_ib_handle_rx_wc(struct
+ 	skb_pull(skb, IPOIB_ENCAP_LEN);
+ 
+ 	dev->last_rx = jiffies;
+-	++dev->stats.rx_packets;
+-	dev->stats.rx_bytes += skb->len;
++	++priv->stats.rx_packets;
++	priv->stats.rx_bytes += skb->len;
+ 
+ 	skb->dev = dev;
+ 	/* XXX get correct PACKET_ type here */
+@@ -379,8 +379,8 @@ static void ipoib_ib_handle_tx_wc(struct
+ 
+ 	ipoib_dma_unmap_tx(priv->ca, tx_req);
+ 
+-	++dev->stats.tx_packets;
+-	dev->stats.tx_bytes += tx_req->skb->len;
++	++priv->stats.tx_packets;
++	priv->stats.tx_bytes += tx_req->skb->len;
+ 
+ 	dev_kfree_skb_any(tx_req->skb);
+ 
+@@ -408,19 +408,19 @@ static int poll_tx(struct ipoib_dev_priv
+ 	return n == MAX_SEND_CQE;
+ }
+ 
+-int ipoib_poll(struct napi_struct *napi, int budget)
++int ipoib_poll(struct net_device *dev, int *budget)
+ {
+-	struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
+-	struct net_device *dev = priv->dev;
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++	int max = min(*budget, dev->quota);
+ 	int done;
+ 	int t;
+ 	int n, i;
++	int ret;
+ 
+ 	done  = 0;
+ 
+ poll_more:
+-	while (done < budget) {
+-		int max = (budget - done);
++	while (max) {
+ 
+ 		t = min(IPOIB_NUM_WC, max);
+ 		n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
+@@ -430,6 +430,7 @@ poll_more:
+ 
+ 			if (wc->wr_id & IPOIB_OP_RECV) {
+ 				++done;
++				--max;
+ 				if (wc->wr_id & IPOIB_OP_CM)
+ 					ipoib_cm_handle_rx_wc(dev, wc);
+ 				else
+@@ -442,27 +443,29 @@ poll_more:
+ 			break;
+ 	}
+ 
+-	if (done < budget) {
++	if (max) {
+ 		if (dev->features & NETIF_F_LRO)
+ 			lro_flush_all(&priv->lro.lro_mgr);
+ 
+-		netif_rx_complete(dev, napi);
++		netif_rx_complete(dev);
+ 		if (unlikely(ib_req_notify_cq(priv->recv_cq,
+ 					      IB_CQ_NEXT_COMP |
+ 					      IB_CQ_REPORT_MISSED_EVENTS)) &&
+-		    netif_rx_reschedule(dev, napi))
++					      netif_rx_reschedule(dev, 0))
+ 			goto poll_more;
+-	}
++		ret = 0;
++	} else
++		ret = 1;
++
++	dev->quota -= done;
++	*budget    -= done;
+ 
+-	return done;
++	return ret;
+ }
+ 
+ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
+ {
+-	struct net_device *dev = dev_ptr;
+-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+-
+-	netif_rx_schedule(dev, &priv->napi);
++	netif_rx_schedule(dev_ptr);
+ }
+ 
+ static void drain_tx_cq(struct net_device *dev)
+@@ -539,8 +542,8 @@ void ipoib_send(struct net_device *dev, 
+ 		phead = skb->data;
+ 		if (unlikely(!skb_pull(skb, hlen))) {
+ 			ipoib_warn(priv, "linear data too small\n");
+-			++dev->stats.tx_dropped;
+-			++dev->stats.tx_errors;
++			++priv->stats.tx_dropped;
++			++priv->stats.tx_errors;
+ 			dev_kfree_skb_any(skb);
+ 			return;
+ 		}
+@@ -548,8 +551,8 @@ void ipoib_send(struct net_device *dev, 
+ 		if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
+ 			ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
+ 				   skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
+-			++dev->stats.tx_dropped;
+-			++dev->stats.tx_errors;
++			++priv->stats.tx_dropped;
++			++priv->stats.tx_errors;
+ 			ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
+ 			return;
+ 		}
+@@ -570,7 +573,7 @@ void ipoib_send(struct net_device *dev, 
+ 	tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
+ 	tx_req->skb = skb;
+ 	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
+-		++dev->stats.tx_errors;
++		++priv->stats.tx_errors;
+ 		dev_kfree_skb_any(skb);
+ 		return;
+ 	}
+@@ -590,7 +593,7 @@ void ipoib_send(struct net_device *dev, 
+ 	if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
+ 			       address->ah, qpn, tx_req, phead, hlen))) {
+ 		ipoib_warn(priv, "post_send failed\n");
+-		++dev->stats.tx_errors;
++		++priv->stats.tx_errors;
+ 		--priv->tx_outstanding;
+ 		ipoib_dma_unmap_tx(priv->ca, tx_req);
+ 		dev_kfree_skb_any(skb);
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
+ 	int i;
+ 
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
++	netif_poll_disable(dev);
+ 
+ 	ipoib_cm_dev_stop(dev);
+ 
+@@ -903,6 +905,7 @@ timeout:
+ 
+ 	ipoib_ah_dev_cleanup(dev);
+ 
++	netif_poll_enable(dev);
+ 	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
+ 
+ 	return 0;
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
+ 
+ 	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
+ 	if (!neigh) {
+-		++dev->stats.tx_dropped;
++		++priv->stats.tx_dropped;
+ 		dev_kfree_skb_any(skb);
+ 		return;
+ 	}
+@@ -646,7 +646,7 @@ err_list:
+ err_path:
+ 	ipoib_neigh_free(dev, neigh);
+ err_drop:
+-	++dev->stats.tx_dropped;
++	++priv->stats.tx_dropped;
+ 	dev_kfree_skb_any(skb);
+ 
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
+ 			} else
+ 				__path_add(dev, path);
+ 		} else {
+-			++dev->stats.tx_dropped;
++			++priv->stats.tx_dropped;
+ 			dev_kfree_skb_any(skb);
+ 		}
+ 
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
+ 		skb_push(skb, sizeof *phdr);
+ 		__skb_queue_tail(&path->queue, skb);
+ 	} else {
+-		++dev->stats.tx_dropped;
++		++priv->stats.tx_dropped;
+ 		dev_kfree_skb_any(skb);
+ 	}
+ 
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
+ 			__skb_queue_tail(&neigh->queue, skb);
+ 			spin_unlock_irqrestore(&priv->lock, flags);
+ 		} else {
+-			++dev->stats.tx_dropped;
++			++priv->stats.tx_dropped;
+ 			dev_kfree_skb_any(skb);
+ 		}
+ 	} else {
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
+ 					   IPOIB_QPN(phdr->hwaddr),
+ 					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
+ 				dev_kfree_skb_any(skb);
+-				++dev->stats.tx_dropped;
++				++priv->stats.tx_dropped;
+ 				return NETDEV_TX_OK;
+ 			}
+ 
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
+ static int ipoib_hard_header(struct sk_buff *skb,
+ 			     struct net_device *dev,
+ 			     unsigned short type,
+-			     const void *daddr, const void *saddr, unsigned len)
++			     void *daddr, void *saddr, unsigned len)
+ {
+ 	struct ipoib_header *header;
+ 
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
+ {
+ 	struct sk_buff *skb;
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
+ 	*to_ipoib_neigh(neigh->neighbour) = NULL;
+ 	while ((skb = __skb_dequeue(&neigh->queue))) {
+-		++dev->stats.tx_dropped;
++		++priv->stats.tx_dropped;
+ 		dev_kfree_skb_any(skb);
+ 	}
+ 	if (ipoib_cm_get(neigh))
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
+ 	priv->tx_ring = NULL;
+ }
+ 
+-static const struct header_ops ipoib_header_ops = {
+-	.create	= ipoib_hard_header,
+-};
+-
+ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
+ 		       void **tcph, u64 *hdr_flags, void *priv)
+ {
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
+ 	dev->change_mtu		 = ipoib_change_mtu;
+ 	dev->hard_start_xmit	 = ipoib_start_xmit;
+ 	dev->tx_timeout		 = ipoib_timeout;
+-	dev->header_ops		 = &ipoib_header_ops;
++	dev->hard_header         = ipoib_hard_header;
+ 	dev->set_multicast_list	 = ipoib_set_mcast_list;
+ 	dev->neigh_setup	 = ipoib_neigh_setup_dev;
+-
+ 	ipoib_set_ethtool_ops(dev);
++	dev->poll                = ipoib_poll;
++	dev->weight              = 100;
+ 
+-	netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
+ 
+ 	dev->watchdog_timeo	 = HZ;
+ 
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+===================================================================
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
+@@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
+ 	}
+ 
+ 	netif_tx_lock_bh(dev);
+-	dev->stats.tx_dropped += tx_dropped;
++	priv->stats.tx_dropped += tx_dropped;
+ 	netif_tx_unlock_bh(dev);
+ 
+ 	kfree(mcast);
+@@ -285,6 +285,7 @@ ipoib_mcast_sendonly_join_complete(int s
+ {
+ 	struct ipoib_mcast *mcast = multicast->context;
+ 	struct net_device *dev = mcast->dev;
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
+ 
+ 	/* We trap for port events ourselves. */
+ 	if (status == -ENETRESET)
+@@ -302,7 +303,7 @@ ipoib_mcast_sendonly_join_complete(int s
+ 		/* Flush out any queued packets */
+ 		netif_tx_lock_bh(dev);
+ 		while (!skb_queue_empty(&mcast->pkt_queue)) {
+-			++dev->stats.tx_dropped;
++			++priv->stats.tx_dropped;
+ 			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
+ 		}
+ 		netif_tx_unlock_bh(dev);
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
+ 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
+ 	    !priv->broadcast					||
+ 	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
+-		++dev->stats.tx_dropped;
++		++priv->stats.tx_dropped;
+ 		dev_kfree_skb_any(skb);
+ 		goto unlock;
+ 	}
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
+ 		if (!mcast) {
+ 			ipoib_warn(priv, "unable to allocate memory for "
+ 				   "multicast structure\n");
+-			++dev->stats.tx_dropped;
++			++priv->stats.tx_dropped;
+ 			dev_kfree_skb_any(skb);
+ 			goto out;
+ 		}
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
+ 		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ 			skb_queue_tail(&mcast->pkt_queue, skb);
+ 		else {
+-			++dev->stats.tx_dropped;
++			++priv->stats.tx_dropped;
+ 			dev_kfree_skb_any(skb);
+ 		}
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0110_restore_get_stats.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0110_restore_get_stats.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0110_restore_get_stats.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_main.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -813,6 +813,13 @@ out:
+ 	return NETDEV_TX_OK;
+ }
+ 
++static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
++{
++	struct ipoib_dev_priv *priv = netdev_priv(dev);
++
++	return &priv->stats;
++}
++
+ static void ipoib_timeout(struct net_device *dev)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -1051,6 +1058,7 @@ static void ipoib_setup(struct net_devic
+ 	dev->stop		 = ipoib_stop;
+ 	dev->change_mtu		 = ipoib_change_mtu;
+ 	dev->hard_start_xmit	 = ipoib_start_xmit;
++	dev->get_stats 		 = ipoib_get_stats;
+ 	dev->tx_timeout		 = ipoib_timeout;
+ 	dev->hard_header         = ipoib_hard_header;
+ 	dev->set_multicast_list	 = ipoib_set_mcast_list;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0900_netif_lock_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0900_netif_lock_to_2_6_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_0900_netif_lock_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_multicast.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -777,7 +777,7 @@ void ipoib_mcast_restart_task(struct wor
+ 	ipoib_mcast_stop_thread(dev, 0);
+ 
+ 	local_irq_save(flags);
+-	netif_addr_lock(dev);
++	netif_tx_lock(dev);
+ 	spin_lock(&priv->lock);
+ 
+ 	/*
+@@ -854,7 +854,7 @@ void ipoib_mcast_restart_task(struct wor
+ 	}
+ 
+ 	spin_unlock(&priv->lock);
+-	netif_addr_unlock(dev);
++	netif_tx_unlock(dev);
+ 	local_irq_restore(flags);
+ 
+ 	/* We have to cancel outside of the spinlock */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,186 @@
+Revert the following patch:
+
+commit 43cb76d91ee85f579a69d42bc8efc08bac560278
+Author: Greg Kroah-Hartman <gregkh at suse.de>
+Date:   Tue Apr 9 12:14:34 2002 -0700
+
+    Network: convert network devices to use struct device instead of class_device
+
+    This lets the network core have the ability to handle suspend/resume
+    issues, if it wants to.
+
+    Thanks to Frederik Deweerdt <frederik.deweerdt at gmail.com> for the arm
+    driver fixes.
+
+    Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib_cm.c   |   13 ++++++-------
+ drivers/infiniband/ulp/ipoib/ipoib_main.c |   26 ++++++++++++++------------
+ drivers/infiniband/ulp/ipoib/ipoib_vlan.c |   10 ++++++----
+ 3 files changed, 26 insertions(+), 23 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -47,6 +47,8 @@ MODULE_PARM_DESC(max_nonsrq_conn_qp,
+ 		 "Max number of connected-mode QPs per interface "
+ 		 "(applied only if shared receive queue is not available)");
+ 
++#define to_net_dev(class) container_of(class, struct net_device, class_dev)
++
+ #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
+ static int data_debug_level;
+ 
+@@ -1433,9 +1435,7 @@ static void ipoib_cm_stale_task(struct w
+ 	spin_unlock_irq(&priv->lock);
+ }
+ 
+-
+-static ssize_t show_mode(struct device *d, struct device_attribute *attr,
+-			 char *buf)
++static ssize_t show_mode(struct class_device *d, char *buf)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
+ 
+@@ -1445,8 +1445,7 @@ static ssize_t show_mode(struct device *
+ 		return sprintf(buf, "datagram\n");
+ }
+ 
+-static ssize_t set_mode(struct device *d, struct device_attribute *attr,
+-			const char *buf, size_t count)
++static ssize_t set_mode(struct class_device *d, const char *buf, size_t count)
+ {
+ 	struct net_device *dev = to_net_dev(d);
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -1490,11 +1489,11 @@ static ssize_t set_mode(struct device *d
+ 	return -EINVAL;
+ }
+ 
+-static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
++static CLASS_DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
+ 
+ int ipoib_cm_add_mode_attr(struct net_device *dev)
+ {
+-	return device_create_file(&dev->dev, &dev_attr_mode);
++	return class_device_create_file(&dev->class_dev, &class_device_attr_mode);
+ }
+ 
+ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -100,6 +100,8 @@ static struct ib_client ipoib_client = {
+ 	.remove = ipoib_remove_one
+ };
+ 
++#define to_net_dev(class) container_of(class, struct net_device, class_dev)
++
+ int ipoib_open(struct net_device *dev)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -1123,14 +1125,13 @@ struct ipoib_dev_priv *ipoib_intf_alloc(
+ 	return netdev_priv(dev);
+ }
+ 
+-static ssize_t show_pkey(struct device *dev,
+-			 struct device_attribute *attr, char *buf)
++static ssize_t show_pkey(struct class_device *dev, char *buf)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
+ 
+ 	return sprintf(buf, "0x%04x\n", priv->pkey);
+ }
+-static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
++static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+ 
+ static ssize_t show_umcast(struct device *dev,
+ 			   struct device_attribute *attr, char *buf)
+@@ -1163,8 +1164,7 @@ int ipoib_add_umcast_attr(struct net_dev
+ 	return device_create_file(&dev->dev, &dev_attr_umcast);
+ }
+ 
+-static ssize_t create_child(struct device *dev,
+-			    struct device_attribute *attr,
++static ssize_t create_child(struct class_device *dev,
+ 			    const char *buf, size_t count)
+ {
+ 	int pkey;
+@@ -1186,10 +1186,9 @@ static ssize_t create_child(struct devic
+ 
+ 	return ret ? ret : count;
+ }
+-static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
++static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
+ 
+-static ssize_t delete_child(struct device *dev,
+-			    struct device_attribute *attr,
++static ssize_t delete_child(struct class_device *dev,
+ 			    const char *buf, size_t count)
+ {
+ 	int pkey;
+@@ -1206,11 +1205,12 @@ static ssize_t delete_child(struct devic
+ 	return ret ? ret : count;
+ 
+ }
+-static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
++static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
+ 
+ int ipoib_add_pkey_attr(struct net_device *dev)
+ {
+-	return device_create_file(&dev->dev, &dev_attr_pkey);
++	return class_device_create_file(&dev->class_dev,
++					&class_device_attr_pkey);
+ }
+ 
+ static struct net_device *ipoib_add_port(const char *format,
+@@ -1324,9 +1324,11 @@ static struct net_device *ipoib_add_port
+ 		goto sysfs_failed;
+ 	if (ipoib_add_umcast_attr(priv->dev))
+ 		goto sysfs_failed;
+-	if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
++	if (class_device_create_file(&priv->dev->class_dev,
++				     &class_device_attr_create_child))
+ 		goto sysfs_failed;
+-	if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
++	if (class_device_create_file(&priv->dev->class_dev,
++				     &class_device_attr_delete_child))
+ 		goto sysfs_failed;
+ 
+ 	return priv->dev;
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -40,15 +40,16 @@
+ 
+ #include "ipoib.h"
+ 
+-static ssize_t show_parent(struct device *d, struct device_attribute *attr,
+-			   char *buf)
++#define to_net_dev(class) container_of(class, struct net_device, class_dev)
++
++static ssize_t show_parent(struct class_device *d, char *buf)
+ {
+ 	struct net_device *dev = to_net_dev(d);
+ 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+ 
+ 	return sprintf(buf, "%s\n", priv->parent->name);
+ }
+-static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
++static CLASS_DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
+ 
+ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+ {
+@@ -124,7 +125,8 @@ int ipoib_vlan_add(struct net_device *pd
+ 	if (ipoib_add_umcast_attr(priv->dev))
+ 		goto sysfs_failed;
+ 
+-	if (device_create_file(&priv->dev->dev, &dev_attr_parent))
++	if (class_device_create_file(&priv->dev->class_dev,
++				     &class_device_attr_parent))
+ 		goto sysfs_failed;
+ 
+ 	list_add_tail(&priv->list, &ppriv->child_intfs);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20_umcast.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20_umcast.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_class_device_to_2_6_20_umcast.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,54 @@
+This patch is the buddy of ipoib_class_device_to_2_6_20.patch, namely it handles
+the same issue in the same method for the /sys/class/net/$dev/umcast sysfs entry
+
+Hence it  needs to go to all the directories under kernel_patches/backport that 
+contain the ipoib_class_device_to_2_6_20.patch
+
+---
+
+backport kernel_patches/fixes/zzz_ipoib_allow_umcast.patch to older kernels
+
+Signed-off-by: Or Gerlitz <ogerlitz at voltaire.com>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib_main.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -1133,16 +1133,14 @@ static ssize_t show_pkey(struct class_de
+ }
+ static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+ 
+-static ssize_t show_umcast(struct device *dev,
+-			   struct device_attribute *attr, char *buf)
++static ssize_t show_umcast(struct class_device *dev, char *buf)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
+ 
+ 	return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
+ }
+ 
+-static ssize_t set_umcast(struct device *dev,
+-			  struct device_attribute *attr,
++static ssize_t set_umcast(struct class_device *dev,
+ 			  const char *buf, size_t count)
+ {
+ 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
+@@ -1157,11 +1155,12 @@ static ssize_t set_umcast(struct device 
+ 
+ 	return count;
+ }
+-static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
++static CLASS_DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
+ 
+ int ipoib_add_umcast_attr(struct net_device *dev)
+ {
+-	return device_create_file(&dev->dev, &dev_attr_umcast);
++	return class_device_create_file(&dev->class_dev,
++					&class_device_attr_umcast);
+ }
+ 
+ static ssize_t create_child(struct class_device *dev,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_lro_to_2.6.23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_lro_to_2.6.23.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_lro_to_2.6.23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,33 @@
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+@@ -100,14 +100,9 @@ static void ipoib_get_strings(struct net
+ 	}
+ }
+ 
+-static int ipoib_get_sset_count(struct net_device *dev, int sset)
++static int ipoib_get_stats_count(struct net_device *dev)
+ {
+-	switch (sset) {
+-	case ETH_SS_STATS:
+-		return ARRAY_SIZE(ipoib_stats_keys);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
++	return ARRAY_SIZE(ipoib_stats_keys);
+ }
+ 
+ static void ipoib_get_ethtool_stats(struct net_device *dev,
+@@ -132,10 +127,8 @@ static const struct ethtool_ops ipoib_et
+ 	.get_tso		= ethtool_op_get_tso,
+ 	.get_coalesce		= ipoib_get_coalesce,
+ 	.set_coalesce		= ipoib_set_coalesce,
+-	.get_flags		= ethtool_op_get_flags,
+-	.set_flags		= ethtool_op_set_flags,
+ 	.get_strings		= ipoib_get_strings,
+-	.get_sset_count		= ipoib_get_sset_count,
++	.get_stats_count 	= ipoib_get_stats_count,
+ 	.get_ethtool_stats	= ipoib_get_ethtool_stats,
+ };
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_mcast_set_pkey_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_mcast_set_pkey_to_2_6_24.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_mcast_set_pkey_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_multicast.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -796,6 +796,10 @@ void ipoib_mcast_restart_task(struct wor
+ 
+ 		memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
+ 
++		/* Add in the P_Key */
++		mgid.raw[4] = (priv->pkey >> 8) & 0xff;
++		mgid.raw[5] = priv->pkey & 0xff;
++
+ 		mcast = __ipoib_mcast_find(dev, &mgid);
+ 		if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ 			struct ipoib_mcast *nmcast;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_skb_to_2_6_20.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_skb_to_2_6_20.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_skb_to_2_6_20.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,16 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_ib.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -35,6 +35,7 @@
+ 
+ #include <linux/delay.h>
+ #include <linux/dma-mapping.h>
++#include <linux/skbuff.h>
+ 
+ #include <rdma/ib_cache.h>
+ #include <linux/ip.h>

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_x_neigh_cleanup.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_x_neigh_cleanup.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/ipoib_x_neigh_cleanup.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,182 @@
+IB/ipoib: Fix neigh destructor oops
+
+For kernels 2.6.20 and older, it may happen that the pointer to
+ipoib_neigh_cleanup() is called after IPoIB has been unloades,
+causing a kernel oops. This problem has been fixed for 2.6.21 with
+the following commit: ecbb416939da77c0d107409976499724baddce7b
+
+The idea with this patch is to have a helper module which remains
+always loaded, and this modules provides the destructor for
+neighbours which calls IPoIB's destructor through a function poiner.
+When IPoIB is unloaded, the function pointer is cleared so subsequent
+calls to a neighbour destructor will be made to valid addresses but
+IPoIB's destructor won't get called.
+
+Signed-off-by: Eli Cohen <eli at mellanox.co.il>
+---
+
+Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- ofa_1_3_dev_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-05-14 12:49:11.000000000 +0300
++++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-05-14 12:49:32.000000000 +0300
+@@ -49,6 +49,7 @@
+ 
+ #include <net/dst.h>
+ #include <linux/vmalloc.h>
++#include <linux/delay.h>
+ 
+ MODULE_AUTHOR("Roland Dreier");
+ MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
+@@ -916,7 +917,7 @@ void ipoib_neigh_free(struct net_device 
+ 
+ static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
+ {
+-	parms->neigh_cleanup = ipoib_neigh_cleanup;
++	parms->neigh_cleanup = ipoib_neigh_cleanup_container;
+ 
+ 	return 0;
+ }
+@@ -1383,9 +1384,13 @@ static int __init ipoib_init_module(void
+ 	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+ #endif
+ 
++
++	ipoib_set_cleanup_function(ipoib_neigh_cleanup);
+ 	ret = ipoib_register_debugfs();
+-	if (ret)
++	if (ret) {
++		ipoib_set_cleanup_function(NULL);
+ 		return ret;
++	}
+ 
+ 	/*
+ 	 * We create our own workqueue mainly because we want to be
+@@ -1397,6 +1402,7 @@ static int __init ipoib_init_module(void
+ 	 */
+ 	ipoib_workqueue = create_singlethread_workqueue("ipoib");
+ 	if (!ipoib_workqueue) {
++		ipoib_set_cleanup_function(NULL);
+ 		ret = -ENOMEM;
+ 		goto err_fs;
+ 	}
+@@ -1404,8 +1410,10 @@ static int __init ipoib_init_module(void
+ 	ib_sa_register_client(&ipoib_sa_client);
+ 
+ 	ret = ib_register_client(&ipoib_client);
+-	if (ret)
++	if (ret) {
++		ipoib_set_cleanup_function(NULL);
+ 		goto err_sa;
++	}
+ 
+ 	return 0;
+ 
+@@ -1421,7 +1429,16 @@ err_fs:
+ 
+ static void __exit ipoib_cleanup_module(void)
+ {
++	int ret;
++
+ 	ib_unregister_client(&ipoib_client);
++
++	do {
++		ret = ipoib_set_cleanup_function(NULL);
++		if (ret)
++			msleep(10);
++	} while(ret);
++
+ 	ib_sa_unregister_client(&ipoib_sa_client);
+ 	ipoib_unregister_debugfs();
+ 	destroy_workqueue(ipoib_workqueue);
+Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/Makefile
+===================================================================
+--- ofa_1_3_dev_kernel.orig/drivers/infiniband/ulp/ipoib/Makefile	2008-05-14 12:49:11.000000000 +0300
++++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/Makefile	2008-05-14 12:49:32.000000000 +0300
+@@ -1,4 +1,4 @@
+-obj-$(CONFIG_INFINIBAND_IPOIB)			+= ib_ipoib.o
++obj-$(CONFIG_INFINIBAND_IPOIB)			+= ib_ipoib.o ipoib_helper.o
+ 
+ ib_ipoib-y					:= ipoib_main.o \
+ 						   ipoib_ib.o \
+Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+===================================================================
+--- ofa_1_3_dev_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-05-14 12:49:11.000000000 +0300
++++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-05-14 12:49:32.000000000 +0300
+@@ -554,6 +554,9 @@ int ipoib_mcast_stop_thread(struct net_d
+ void ipoib_mcast_dev_down(struct net_device *dev);
+ void ipoib_mcast_dev_flush(struct net_device *dev);
+ 
++int ipoib_set_cleanup_function(void (*func)(struct neighbour *n));
++void ipoib_neigh_cleanup_container(struct neighbour *n);
++
+ #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
+ int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
+Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/ipoib_helper.c
+===================================================================
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/ipoib/ipoib_helper.c	2008-05-14 12:49:32.000000000 +0300
+@@ -0,0 +1,63 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <net/neighbour.h>
++
++MODULE_AUTHOR("Eli Cohen");
++MODULE_DESCRIPTION("container for ipoib neighbour destructor");
++MODULE_LICENSE("Dual BSD/GPL");
++
++DEFINE_SPINLOCK(spl);
++static int busy;
++
++static void (*cleanup_func)(struct neighbour *n);
++
++static int ipoib_set_cleanup_function(void (*func)(struct neighbour *n))
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&spl, flags);
++	if (busy) {
++		spin_unlock_irqrestore(&spl, flags);
++		return -EBUSY;
++	}
++	cleanup_func = func;
++	spin_unlock_irqrestore(&spl, flags);
++
++	return 0;
++}
++
++static void ipoib_neigh_cleanup_container(struct neighbour *n)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&spl, flags);
++	busy = 1;
++	spin_unlock_irqrestore(&spl, flags);
++	if (cleanup_func)
++		cleanup_func(n);
++
++	spin_lock_irqsave(&spl, flags);
++	busy = 0;
++	spin_unlock_irqrestore(&spl, flags);
++}
++
++
++EXPORT_SYMBOL(ipoib_set_cleanup_function);
++EXPORT_SYMBOL(ipoib_neigh_cleanup_container);
++
++
++static int __init ipoib_helper_init(void)
++{
++	if (!try_module_get(THIS_MODULE))
++		return -1;
++
++	return 0;
++}
++
++
++static void __exit ipoib_helper_cleanup(void)
++{
++}
++
++module_init(ipoib_helper_init);
++module_exit(ipoib_helper_cleanup);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,4742 @@
+From 89ac09ec66db75fbda1bd77918066fb2ddebac38 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Mon, 25 Aug 2008 16:16:26 +0300
+Subject: [PATCH] iscsi_01_sync_kernel_code_with_release_2.0-869.2
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c            |  529 ++++++-------
+ drivers/scsi/iscsi_tcp.h            |    7 
+ drivers/scsi/libiscsi.c             | 1455 +++++++++++++++---------------------
+ drivers/scsi/scsi_transport_iscsi.c |  500 +++---------
+ include/scsi/libiscsi.h             |  108 +-
+ include/scsi/scsi_transport_iscsi.h |   93 +-
+ 6 files changed, 1119 insertions(+), 1573 deletions(-)
+
+Index: ofed_kernel/drivers/scsi/iscsi_tcp.c
+===================================================================
+--- ofed_kernel.orig/drivers/scsi/iscsi_tcp.c
++++ ofed_kernel/drivers/scsi/iscsi_tcp.c
+@@ -64,10 +64,6 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+ 
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+ 
+@@ -498,63 +494,58 @@ iscsi_tcp_data_recv_prep(struct iscsi_tc
+  * must be called with session lock
+  */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_tcp_task *tcp_task = task->dd_data;
++	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ 	struct iscsi_r2t_info *r2t;
+ 
+-	/* nothing to do for mgmt tasks */
+-	if (!task->sc)
+-		return;
+-
+-	/* flush task's r2t queues */
+-	while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++	/* flush ctask's r2t queues */
++	while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ 			    sizeof(void*));
+-		debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++		debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ 	}
+ 
+-	r2t = tcp_task->r2t;
++	r2t = tcp_ctask->r2t;
+ 	if (r2t != NULL) {
+-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ 			    sizeof(void*));
+-		tcp_task->r2t = NULL;
++		tcp_ctask->r2t = NULL;
+ 	}
+ }
+ 
+ /**
+  * iscsi_data_rsp - SCSI Data-In Response processing
+  * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+  **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-	struct iscsi_tcp_task *tcp_task = task->dd_data;
++	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ 	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ 	struct iscsi_session *session = conn->session;
+-	struct scsi_cmnd *sc = task->sc;
++	struct scsi_cmnd *sc = ctask->sc;
+ 	int datasn = be32_to_cpu(rhdr->datasn);
+-	unsigned total_in_length = scsi_in(sc)->length;
+ 
+ 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ 	if (tcp_conn->in.datalen == 0)
+ 		return 0;
+ 
+-	if (tcp_task->exp_datasn != datasn) {
+-		debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+-		          __func__, tcp_task->exp_datasn, datasn);
++	if (tcp_ctask->exp_datasn != datasn) {
++		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
++		          __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ 		return ISCSI_ERR_DATASN;
+ 	}
+ 
+-	tcp_task->exp_datasn++;
++	tcp_ctask->exp_datasn++;
+ 
+-	tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+-	if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
++	tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++	if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ 		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+-		          __func__, tcp_task->data_offset,
+-		          tcp_conn->in.datalen, total_in_length);
++		          __FUNCTION__, tcp_ctask->data_offset,
++		          tcp_conn->in.datalen, scsi_bufflen(sc));
+ 		return ISCSI_ERR_DATA_OFFSET;
+ 	}
+ 
+@@ -567,8 +558,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, 
+ 
+ 			if (res_count > 0 &&
+ 			    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+-			     res_count <= total_in_length))
+-				scsi_in(sc)->resid = res_count;
++			     res_count <= scsi_bufflen(sc)))
++				scsi_set_resid(sc, res_count);
+ 			else
+ 				sc->result = (DID_BAD_TARGET << 16) |
+ 					rhdr->cmd_status;
+@@ -582,7 +573,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, 
+ /**
+  * iscsi_solicit_data_init - initialize first Data-Out
+  * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+  * @r2t: R2T info
+  *
+  * Notes:
+@@ -592,7 +583,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, 
+  *	This function is called with connection lock taken.
+  **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ 			struct iscsi_r2t_info *r2t)
+ {
+ 	struct iscsi_data *hdr;
+@@ -603,8 +594,8 @@ iscsi_solicit_data_init(struct iscsi_con
+ 	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ 	r2t->solicit_datasn++;
+ 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+-	hdr->itt = task->hdr->itt;
++	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++	hdr->itt = ctask->hdr->itt;
+ 	hdr->exp_statsn = r2t->exp_statsn;
+ 	hdr->offset = cpu_to_be32(r2t->data_offset);
+ 	if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -624,14 +615,14 @@ iscsi_solicit_data_init(struct iscsi_con
+ /**
+  * iscsi_r2t_rsp - iSCSI R2T Response processing
+  * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+  **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ 	struct iscsi_r2t_info *r2t;
+ 	struct iscsi_session *session = conn->session;
+-	struct iscsi_tcp_task *tcp_task = task->dd_data;
++	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ 	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ 	int r2tsn = be32_to_cpu(rhdr->r2tsn);
+@@ -644,23 +635,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, s
+ 		return ISCSI_ERR_DATALEN;
+ 	}
+ 
+-	if (tcp_task->exp_datasn != r2tsn){
+-		debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+-		          __func__, tcp_task->exp_datasn, r2tsn);
++	if (tcp_ctask->exp_datasn != r2tsn){
++		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
++		          __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ 		return ISCSI_ERR_R2TSN;
+ 	}
+ 
+ 	/* fill-in new R2T associated with the task */
+ 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ 
+-	if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
++	if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ 		iscsi_conn_printk(KERN_INFO, conn,
+ 				  "dropping R2T itt %d in recovery.\n",
+-				  task->itt);
++				  ctask->itt);
+ 		return 0;
+ 	}
+ 
+-	rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++	rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ 	BUG_ON(!rc);
+ 
+ 	r2t->exp_statsn = rhdr->statsn;
+@@ -668,7 +659,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, s
+ 	if (r2t->data_length == 0) {
+ 		iscsi_conn_printk(KERN_ERR, conn,
+ 				  "invalid R2T with zero data len\n");
+-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ 			    sizeof(void*));
+ 		return ISCSI_ERR_DATALEN;
+ 	}
+@@ -679,12 +670,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, s
+ 			    r2t->data_length, session->max_burst);
+ 
+ 	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+-	if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
++	if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+ 		iscsi_conn_printk(KERN_ERR, conn,
+ 				  "invalid R2T with data len %u at offset %u "
+ 				  "and total length %d\n", r2t->data_length,
+-				  r2t->data_offset, scsi_out(task->sc)->length);
+-		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++				  r2t->data_offset, scsi_bufflen(ctask->sc));
++		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ 			    sizeof(void*));
+ 		return ISCSI_ERR_DATALEN;
+ 	}
+@@ -692,13 +683,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, s
+ 	r2t->ttt = rhdr->ttt; /* no flip */
+ 	r2t->solicit_datasn = 0;
+ 
+-	iscsi_solicit_data_init(conn, task, r2t);
++	iscsi_solicit_data_init(conn, ctask, r2t);
+ 
+-	tcp_task->exp_datasn = r2tsn + 1;
+-	__kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
++	tcp_ctask->exp_datasn = r2tsn + 1;
++	__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ 	conn->r2t_pdus_cnt++;
+ 
+-	iscsi_requeue_task(task);
++	iscsi_requeue_ctask(ctask);
+ 	return 0;
+ }
+ 
+@@ -741,8 +732,10 @@ static int
+ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ 	int rc = 0, opcode, ahslen;
++	struct iscsi_session *session = conn->session;
+ 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-	struct iscsi_task *task;
++	struct iscsi_cmd_task *ctask;
++	uint32_t itt;
+ 
+ 	/* verify PDU length */
+ 	tcp_conn->in.datalen = ntoh24(hdr->dlength);
+@@ -760,7 +753,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn 
+ 
+ 	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ 	/* verify itt (itt encoding: age+cid+itt) */
+-	rc = iscsi_verify_itt(conn, hdr->itt);
++	rc = iscsi_verify_itt(conn, hdr, &itt);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -769,21 +762,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn 
+ 
+ 	switch(opcode) {
+ 	case ISCSI_OP_SCSI_DATA_IN:
++		ctask = session->cmds[itt];
+ 		spin_lock(&conn->session->lock);
+-		task = iscsi_itt_to_ctask(conn, hdr->itt);
+-		if (!task)
+-			rc = ISCSI_ERR_BAD_ITT;
+-		else
+-			rc = iscsi_data_rsp(conn, task);
+-		if (rc) {
+-			spin_unlock(&conn->session->lock);
+-			break;
+-		}
+-
++		rc = iscsi_data_rsp(conn, ctask);
++		spin_unlock(&conn->session->lock);
++		if (rc)
++			return rc;
+ 		if (tcp_conn->in.datalen) {
+-			struct iscsi_tcp_task *tcp_task = task->dd_data;
++			struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ 			struct hash_desc *rx_hash = NULL;
+-			struct scsi_data_buffer *sdb = scsi_in(task->sc);
+ 
+ 			/*
+ 			 * Setup copy of Data-In into the Scsi_Cmnd
+@@ -798,21 +785,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn 
+ 
+ 			debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ 				  "datalen=%d)\n", tcp_conn,
+-				  tcp_task->data_offset,
++				  tcp_ctask->data_offset,
+ 				  tcp_conn->in.datalen);
+-			rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+-						   sdb->table.sgl,
+-						   sdb->table.nents,
+-						   tcp_task->data_offset,
+-						   tcp_conn->in.datalen,
+-						   iscsi_tcp_process_data_in,
+-						   rx_hash);
+-			spin_unlock(&conn->session->lock);
+-			return rc;
++			return iscsi_segment_seek_sg(&tcp_conn->in.segment,
++						     scsi_sglist(ctask->sc),
++						     scsi_sg_count(ctask->sc),
++						     tcp_ctask->data_offset,
++						     tcp_conn->in.datalen,
++						     iscsi_tcp_process_data_in,
++						     rx_hash);
+ 		}
+-		rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+-		spin_unlock(&conn->session->lock);
+-		break;
++		/* fall through */
+ 	case ISCSI_OP_SCSI_CMD_RSP:
+ 		if (tcp_conn->in.datalen) {
+ 			iscsi_tcp_data_recv_prep(tcp_conn);
+@@ -821,17 +804,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn 
+ 		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ 		break;
+ 	case ISCSI_OP_R2T:
+-		spin_lock(&conn->session->lock);
+-		task = iscsi_itt_to_ctask(conn, hdr->itt);
+-		if (!task)
+-			rc = ISCSI_ERR_BAD_ITT;
+-		else if (ahslen)
++		ctask = session->cmds[itt];
++		if (ahslen)
+ 			rc = ISCSI_ERR_AHSLEN;
+-		else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+-			rc = iscsi_r2t_rsp(conn, task);
+-		else
++		else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++			spin_lock(&session->lock);
++			rc = iscsi_r2t_rsp(conn, ctask);
++			spin_unlock(&session->lock);
++		} else
+ 			rc = ISCSI_ERR_PROTO;
+-		spin_unlock(&conn->session->lock);
+ 		break;
+ 	case ISCSI_OP_LOGIN_RSP:
+ 	case ISCSI_OP_TEXT_RSP:
+@@ -1193,7 +1174,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_con
+ {
+ 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ 
+-	debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
++	debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ 			conn->hdrdgst_en? ", digest enabled" : "");
+ 
+ 	/* Clear the data segment - needs to be filled in by the
+@@ -1202,7 +1183,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_con
+ 
+ 	/* If header digest is enabled, compute the CRC and
+ 	 * place the digest into the same buffer. We make
+-	 * sure that both iscsi_tcp_task and mtask have
++	 * sure that both iscsi_tcp_ctask and mtask have
+ 	 * sufficient room.
+ 	 */
+ 	if (conn->hdrdgst_en) {
+@@ -1234,7 +1215,7 @@ iscsi_tcp_send_data_prep(struct iscsi_co
+ 	struct hash_desc *tx_hash = NULL;
+ 	unsigned int hdr_spec_len;
+ 
+-	debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
++	debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ 			tcp_conn, offset, len,
+ 			conn->datadgst_en? ", digest enabled" : "");
+ 
+@@ -1259,7 +1240,7 @@ iscsi_tcp_send_linear_data_prepare(struc
+ 	struct hash_desc *tx_hash = NULL;
+ 	unsigned int hdr_spec_len;
+ 
+-	debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
++	debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ 		  conn->datadgst_en? ", digest enabled" : "");
+ 
+ 	/* Make sure the datalen matches what the caller
+@@ -1277,7 +1258,7 @@ iscsi_tcp_send_linear_data_prepare(struc
+ /**
+  * iscsi_solicit_data_cont - initialize next Data-Out
+  * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+  * @r2t: R2T info
+  * @left: bytes left to transfer
+  *
+@@ -1288,7 +1269,7 @@ iscsi_tcp_send_linear_data_prepare(struc
+  *	Called under connection lock.
+  **/
+ static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ 			struct iscsi_r2t_info *r2t)
+ {
+ 	struct iscsi_data *hdr;
+@@ -1305,8 +1286,8 @@ iscsi_solicit_data_cont(struct iscsi_con
+ 	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ 	r2t->solicit_datasn++;
+ 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+-	hdr->itt = task->hdr->itt;
++	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++	hdr->itt = ctask->hdr->itt;
+ 	hdr->exp_statsn = r2t->exp_statsn;
+ 	new_offset = r2t->data_offset + r2t->sent;
+ 	hdr->offset = cpu_to_be32(new_offset);
+@@ -1324,76 +1305,87 @@ iscsi_solicit_data_cont(struct iscsi_con
+ }
+ 
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+  * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+  * @sc: scsi command
+  **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_tcp_task *tcp_task = task->dd_data;
+-	struct iscsi_conn *conn = task->conn;
+-	struct scsi_cmnd *sc = task->sc;
++	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++	struct iscsi_conn *conn = ctask->conn;
++	struct scsi_cmnd *sc = ctask->sc;
+ 	int err;
+ 
+-	if (!sc) {
+-		/*
+-		 * mgmt tasks do not have a scatterlist since they come
+-		 * in from the iscsi interface.
+-		 */
+-		debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+-			   task->itt);
+-
+-		/* Prepare PDU, optionally w/ immediate data */
+-		iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+-
+-		/* If we have immediate data, attach a payload */
+-		if (task->data_count)
+-			iscsi_tcp_send_linear_data_prepare(conn, task->data,
+-							   task->data_count);
+-		return 0;
+-	}
+-
+-	BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+-	tcp_task->sent = 0;
+-	tcp_task->exp_datasn = 0;
++	BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++	tcp_ctask->sent = 0;
++	tcp_ctask->exp_datasn = 0;
+ 
+ 	/* Prepare PDU, optionally w/ immediate data */
+-	debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+-		    conn->id, task->itt, task->imm_count,
+-		    task->unsol_count);
+-	iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++	debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
++		    conn->id, ctask->itt, ctask->imm_count,
++		    ctask->unsol_count);
++	iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+ 
+-	if (!task->imm_count)
++	if (!ctask->imm_count)
+ 		return 0;
+ 
+ 	/* If we have immediate data, attach a payload */
+-	err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+-				       scsi_out(sc)->table.nents,
+-				       0, task->imm_count);
++	err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
++				       0, ctask->imm_count);
+ 	if (err)
+ 		return err;
+-	tcp_task->sent += task->imm_count;
+-	task->imm_count = 0;
++	tcp_ctask->sent += ctask->imm_count;
++	ctask->imm_count = 0;
++	return 0;
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ *	The function can return -EAGAIN in which case caller must
++ *	call it again later, or recover. '0' return code means successful
++ *	xmit.
++ **/
++static int
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++	int rc;
++
++	/* Flush any pending data first. */
++	rc = iscsi_tcp_flush(conn);
++	if (rc < 0)
++		return rc;
++
++	if (mtask->hdr->itt == RESERVED_ITT) {
++		struct iscsi_session *session = conn->session;
++
++		spin_lock_bh(&session->lock);
++		iscsi_free_mgmt_task(conn, mtask);
++		spin_unlock_bh(&session->lock);
++	}
++
+ 	return 0;
+ }
+ 
+ /*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
++ * iscsi_tcp_ctask_xmit - xmit normal PDU task
++ * @conn: iscsi connection
++ * @ctask: iscsi command task
+  *
+  * We're expected to return 0 when everything was transmitted succesfully,
+  * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+  * of error.
+  */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_conn *conn = task->conn;
+-	struct iscsi_tcp_task *tcp_task = task->dd_data;
+-	struct scsi_cmnd *sc = task->sc;
+-	struct scsi_data_buffer *sdb;
++	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++	struct scsi_cmnd *sc = ctask->sc;
+ 	int rc = 0;
+ 
+ flush:
+@@ -1402,39 +1394,32 @@ flush:
+ 	if (rc < 0)
+ 		return rc;
+ 
+-	/* mgmt command */
+-	if (!sc) {
+-		if (task->hdr->itt == RESERVED_ITT)
+-			iscsi_put_task(task);
+-		return 0;
+-	}
+-
+ 	/* Are we done already? */
+ 	if (sc->sc_data_direction != DMA_TO_DEVICE)
+ 		return 0;
+ 
+-	sdb = scsi_out(sc);
+-	if (task->unsol_count != 0) {
+-		struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++	if (ctask->unsol_count != 0) {
++		struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+ 
+ 		/* Prepare a header for the unsolicited PDU.
+ 		 * The amount of data we want to send will be
+-		 * in task->data_count.
++		 * in ctask->data_count.
+ 		 * FIXME: return the data count instead.
+ 		 */
+-		iscsi_prep_unsolicit_data_pdu(task, hdr);
++		iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+ 
+ 		debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+-				task->itt, tcp_task->sent, task->data_count);
++				ctask->itt, tcp_ctask->sent, ctask->data_count);
+ 
+ 		iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+-		rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+-					      sdb->table.nents, tcp_task->sent,
+-					      task->data_count);
++		rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++					      scsi_sg_count(sc),
++					      tcp_ctask->sent,
++					      ctask->data_count);
+ 		if (rc)
+ 			goto fail;
+-		tcp_task->sent += task->data_count;
+-		task->unsol_count -= task->data_count;
++		tcp_ctask->sent += ctask->data_count;
++		ctask->unsol_count -= ctask->data_count;
+ 		goto flush;
+ 	} else {
+ 		struct iscsi_session *session = conn->session;
+@@ -1443,22 +1428,22 @@ flush:
+ 		/* All unsolicited PDUs sent. Check for solicited PDUs.
+ 		 */
+ 		spin_lock_bh(&session->lock);
+-		r2t = tcp_task->r2t;
++		r2t = tcp_ctask->r2t;
+ 		if (r2t != NULL) {
+ 			/* Continue with this R2T? */
+-			if (!iscsi_solicit_data_cont(conn, task, r2t)) {
++			if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ 				debug_scsi("  done with r2t %p\n", r2t);
+ 
+-				__kfifo_put(tcp_task->r2tpool.queue,
++				__kfifo_put(tcp_ctask->r2tpool.queue,
+ 					    (void*)&r2t, sizeof(void*));
+-				tcp_task->r2t = r2t = NULL;
++				tcp_ctask->r2t = r2t = NULL;
+ 			}
+ 		}
+ 
+ 		if (r2t == NULL) {
+-			__kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++			__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ 				    sizeof(void*));
+-			r2t = tcp_task->r2t;
++			r2t = tcp_ctask->r2t;
+ 		}
+ 		spin_unlock_bh(&session->lock);
+ 
+@@ -1469,19 +1454,19 @@ flush:
+ 		}
+ 
+ 		debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+-			r2t, r2t->solicit_datasn - 1, task->itt,
++			r2t, r2t->solicit_datasn - 1, ctask->itt,
+ 			r2t->data_offset + r2t->sent, r2t->data_count);
+ 
+ 		iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ 					sizeof(struct iscsi_hdr));
+ 
+-		rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+-					      sdb->table.nents,
++		rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++					      scsi_sg_count(sc),
+ 					      r2t->data_offset + r2t->sent,
+ 					      r2t->data_count);
+ 		if (rc)
+ 			goto fail;
+-		tcp_task->sent += r2t->data_count;
++		tcp_ctask->sent += r2t->data_count;
+ 		r2t->sent += r2t->data_count;
+ 		goto flush;
+ 	}
+@@ -1498,7 +1483,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_s
+ 	struct iscsi_cls_conn *cls_conn;
+ 	struct iscsi_tcp_conn *tcp_conn;
+ 
+-	cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++	cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ 	if (!cls_conn)
+ 		return NULL;
+ 	conn = cls_conn->dd_data;
+@@ -1508,14 +1493,18 @@ iscsi_tcp_conn_create(struct iscsi_cls_s
+ 	 */
+ 	conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+ 
+-	tcp_conn = conn->dd_data;
++	tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++	if (!tcp_conn)
++		goto tcp_conn_alloc_fail;
++
++	conn->dd_data = tcp_conn;
+ 	tcp_conn->iscsi_conn = conn;
+ 
+ 	tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ 						  CRYPTO_ALG_ASYNC);
+ 	tcp_conn->tx_hash.flags = 0;
+ 	if (IS_ERR(tcp_conn->tx_hash.tfm))
+-		goto free_conn;
++		goto free_tcp_conn;
+ 
+ 	tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ 						  CRYPTO_ALG_ASYNC);
+@@ -1527,12 +1516,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_s
+ 
+ free_tx_tfm:
+ 	crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
++free_tcp_conn:
+ 	iscsi_conn_printk(KERN_ERR, conn,
+ 			  "Could not create connection due to crc32c "
+ 			  "loading error. Make sure the crc32c "
+ 			  "module is built as a module or into the "
+ 			  "kernel\n");
++	kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ 	iscsi_conn_teardown(cls_conn);
+ 	return NULL;
+ }
+@@ -1553,6 +1544,7 @@ iscsi_tcp_release_conn(struct iscsi_conn
+ 
+ 	spin_lock_bh(&session->lock);
+ 	tcp_conn->sock = NULL;
++	conn->recv_lock = NULL;
+ 	spin_unlock_bh(&session->lock);
+ 	sockfd_put(sock);
+ }
+@@ -1564,32 +1556,20 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_
+ 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ 
+ 	iscsi_tcp_release_conn(conn);
++	iscsi_conn_teardown(cls_conn);
+ 
+ 	if (tcp_conn->tx_hash.tfm)
+ 		crypto_free_hash(tcp_conn->tx_hash.tfm);
+ 	if (tcp_conn->rx_hash.tfm)
+ 		crypto_free_hash(tcp_conn->rx_hash.tfm);
+ 
+-	iscsi_conn_teardown(cls_conn);
++	kfree(tcp_conn);
+ }
+ 
+ static void
+ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ {
+ 	struct iscsi_conn *conn = cls_conn->dd_data;
+-	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+-	/* userspace may have goofed up and not bound us */
+-	if (!tcp_conn->sock)
+-		return;
+-	/*
+-	 * Make sure our recv side is stopped.
+-	 * Older tools called conn stop before ep_disconnect
+-	 * so IO could still be coming in.
+-	 */
+-	write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+-	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+-	write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+ 
+ 	iscsi_conn_stop(cls_conn, flag);
+ 	iscsi_tcp_release_conn(conn);
+@@ -1640,8 +1620,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_ses
+ 		    struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ 		    int is_leading)
+ {
+-	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-	struct iscsi_host *ihost = shost_priv(shost);
+ 	struct iscsi_conn *conn = cls_conn->dd_data;
+ 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ 	struct sock *sk;
+@@ -1665,8 +1643,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_ses
+ 	if (err)
+ 		goto free_socket;
+ 
+-	err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+-				&ihost->local_port, kernel_getsockname);
++	err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
++				&conn->local_port, kernel_getsockname);
+ 	if (err)
+ 		goto free_socket;
+ 
+@@ -1683,6 +1661,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_ses
+ 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ 	sk->sk_allocation = GFP_ATOMIC;
+ 
++	/* FIXME: disable Nagle's algorithm */
++
++	/*
++	 * Intercept TCP callbacks for sendfile like receive
++	 * processing.
++	 */
++	conn->recv_lock = &sk->sk_callback_lock;
+ 	iscsi_conn_set_callbacks(conn);
+ 	tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ 	/*
+@@ -1696,6 +1681,21 @@ free_socket:
+ 	return err;
+ }
+ 
++/* called with host lock */
++static void
++iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++	debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
++
++	/* Prepare PDU, optionally w/ immediate data */
++	iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
++
++	/* If we have immediate data, attach a payload */
++	if (mtask->data_count)
++		iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
++						   mtask->data_count);
++}
++
+ static int
+ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ {
+@@ -1706,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session
+ 	 * initialize per-task: R2T pool and xmit queue
+ 	 */
+ 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+-	        struct iscsi_task *task = session->cmds[cmd_i];
+-		struct iscsi_tcp_task *tcp_task = task->dd_data;
++	        struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ 
+ 		/*
+ 		 * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session
+ 		 */
+ 
+ 		/* R2T pool */
+-		if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++		if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ 				    sizeof(struct iscsi_r2t_info))) {
+ 			goto r2t_alloc_fail;
+ 		}
+ 
+ 		/* R2T xmit queue */
+-		tcp_task->r2tqueue = kfifo_alloc(
++		tcp_ctask->r2tqueue = kfifo_alloc(
+ 		      session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+-		if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+-			iscsi_pool_free(&tcp_task->r2tpool);
++		if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++			iscsi_pool_free(&tcp_ctask->r2tpool);
+ 			goto r2t_alloc_fail;
+ 		}
+ 	}
+@@ -1734,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session
+ 
+ r2t_alloc_fail:
+ 	for (i = 0; i < cmd_i; i++) {
+-		struct iscsi_task *task = session->cmds[i];
+-		struct iscsi_tcp_task *tcp_task = task->dd_data;
++		struct iscsi_cmd_task *ctask = session->cmds[i];
++		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ 
+-		kfifo_free(tcp_task->r2tqueue);
+-		iscsi_pool_free(&tcp_task->r2tpool);
++		kfifo_free(tcp_ctask->r2tqueue);
++		iscsi_pool_free(&tcp_ctask->r2tpool);
+ 	}
+ 	return -ENOMEM;
+ }
+@@ -1749,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session 
+ 	int i;
+ 
+ 	for (i = 0; i < session->cmds_max; i++) {
+-		struct iscsi_task *task = session->cmds[i];
+-		struct iscsi_tcp_task *tcp_task = task->dd_data;
++		struct iscsi_cmd_task *ctask = session->cmds[i];
++		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ 
+-		kfifo_free(tcp_task->r2tqueue);
+-		iscsi_pool_free(&tcp_task->r2tpool);
++		kfifo_free(tcp_ctask->r2tqueue);
++		iscsi_pool_free(&tcp_ctask->r2tpool);
+ 	}
+ }
+ 
+@@ -1818,6 +1818,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cl
+ 	return len;
+ }
+ 
++static int
++iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
++			 char *buf)
++{
++        struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
++	int len;
++
++	switch (param) {
++	case ISCSI_HOST_PARAM_IPADDRESS:
++		spin_lock_bh(&session->lock);
++		if (!session->leadconn)
++			len = -ENODEV;
++		else
++			len = sprintf(buf, "%s\n",
++				     session->leadconn->local_address);
++		spin_unlock_bh(&session->lock);
++		break;
++	default:
++		return iscsi_host_get_param(shost, param, buf);
++	}
++	return len;
++}
++
+ static void
+ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ {
+@@ -1843,70 +1866,54 @@ iscsi_conn_get_stats(struct iscsi_cls_co
+ }
+ 
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+-			 uint16_t qdepth, uint32_t initial_cmdsn,
+-			 uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++			 struct scsi_transport_template *scsit,
++			 uint16_t cmds_max, uint16_t qdepth,
++			 uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ 	struct iscsi_cls_session *cls_session;
+ 	struct iscsi_session *session;
+-	struct Scsi_Host *shost;
++	uint32_t hn;
+ 	int cmd_i;
+ 
+-	if (ep) {
+-		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+-		return NULL;
+-	}
+-
+-	shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+-	if (!shost)
+-		return NULL;
+-	shost->transportt = iscsi_tcp_scsi_transport;
+-	shost->max_lun = iscsi_max_lun;
+-	shost->max_id = 0;
+-	shost->max_channel = 0;
+-	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+-	if (iscsi_host_add(shost, NULL))
+-		goto free_host;
+-	*hostno = shost->host_no;
+-
+-	cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+-					  sizeof(struct iscsi_tcp_task),
+-					  initial_cmdsn, 0);
++	cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
++					 sizeof(struct iscsi_tcp_cmd_task),
++					 sizeof(struct iscsi_tcp_mgmt_task),
++					 initial_cmdsn, &hn);
+ 	if (!cls_session)
+-		goto remove_host;
+-	session = cls_session->dd_data;
++		return NULL;
++	*hostno = hn;
+ 
+-	shost->can_queue = session->scsi_cmds_max;
++	session = class_to_transport_session(cls_session);
+ 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+-		struct iscsi_task *task = session->cmds[cmd_i];
+-		struct iscsi_tcp_task *tcp_task = task->dd_data;
++		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++		ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
++		ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
++	}
++
++	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++		struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++		struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+ 
+-		task->hdr = &tcp_task->hdr.cmd_hdr;
+-		task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++		mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ 	}
+ 
+-	if (iscsi_r2tpool_alloc(session))
+-		goto remove_session;
++	if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++		goto r2tpool_alloc_fail;
++
+ 	return cls_session;
+ 
+-remove_session:
++r2tpool_alloc_fail:
+ 	iscsi_session_teardown(cls_session);
+-remove_host:
+-	iscsi_host_remove(shost);
+-free_host:
+-	iscsi_host_free(shost);
+ 	return NULL;
+ }
+ 
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+-	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+-	iscsi_r2tpool_free(cls_session->dd_data);
+-
+-	iscsi_host_remove(shost);
+-	iscsi_host_free(shost);
++	iscsi_r2tpool_free(class_to_transport_session(cls_session));
++	iscsi_session_teardown(cls_session);
+ }
+ 
+ static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+@@ -1961,11 +1968,14 @@ static struct iscsi_transport iscsi_tcp_
+ 				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ 				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ 				  ISCSI_LU_RESET_TMO |
+-				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
+-				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++				  ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ 	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ 				  ISCSI_HOST_INITIATOR_NAME |
+ 				  ISCSI_HOST_NETDEV_NAME,
++	.host_template		= &iscsi_sht,
++	.conndata_size		= sizeof(struct iscsi_conn),
++	.max_conn		= 1,
++	.max_cmd_len		= 16,
+ 	/* session management */
+ 	.create_session		= iscsi_tcp_session_create,
+ 	.destroy_session	= iscsi_tcp_session_destroy,
+@@ -1979,14 +1989,16 @@ static struct iscsi_transport iscsi_tcp_
+ 	.start_conn		= iscsi_conn_start,
+ 	.stop_conn		= iscsi_tcp_conn_stop,
+ 	/* iscsi host params */
+-	.get_host_param		= iscsi_host_get_param,
++	.get_host_param		= iscsi_tcp_host_get_param,
+ 	.set_host_param		= iscsi_host_set_param,
+ 	/* IO */
+ 	.send_pdu		= iscsi_conn_send_pdu,
+ 	.get_stats		= iscsi_conn_get_stats,
+-	.init_task		= iscsi_tcp_task_init,
+-	.xmit_task		= iscsi_tcp_task_xmit,
+-	.cleanup_task		= iscsi_tcp_cleanup_task,
++	.init_cmd_task		= iscsi_tcp_ctask_init,
++	.init_mgmt_task		= iscsi_tcp_mtask_init,
++	.xmit_cmd_task		= iscsi_tcp_ctask_xmit,
++	.xmit_mgmt_task		= iscsi_tcp_mtask_xmit,
++	.cleanup_cmd_task	= iscsi_tcp_cleanup_ctask,
+ 	/* recovery */
+ 	.session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2011,9 @@ iscsi_tcp_init(void)
+ 		       iscsi_max_lun);
+ 		return -EINVAL;
+ 	}
++	iscsi_tcp_transport.max_lun = iscsi_max_lun;
+ 
+-	iscsi_tcp_scsi_transport = iscsi_register_transport(
+-							&iscsi_tcp_transport);
+-	if (!iscsi_tcp_scsi_transport)
++	if (!iscsi_register_transport(&iscsi_tcp_transport))
+ 		return -ENODEV;
+ 
+ 	return 0;
+Index: ofed_kernel/drivers/scsi/iscsi_tcp.h
+===================================================================
+--- ofed_kernel.orig/drivers/scsi/iscsi_tcp.h
++++ ofed_kernel/drivers/scsi/iscsi_tcp.h
+@@ -103,6 +103,11 @@ struct iscsi_data_task {
+ 	char			hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
+ };
+ 
++struct iscsi_tcp_mgmt_task {
++	struct iscsi_hdr	hdr;
++	char			hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
++};
++
+ struct iscsi_r2t_info {
+ 	__be32			ttt;		/* copied from R2T */
+ 	__be32			exp_statsn;	/* copied from R2T */
+@@ -114,7 +119,7 @@ struct iscsi_r2t_info {
+ 	struct iscsi_data_task	dtask;		/* Data-Out header buf */
+ };
+ 
+-struct iscsi_tcp_task {
++struct iscsi_tcp_cmd_task {
+ 	struct iscsi_hdr_buff {
+ 		struct iscsi_cmd	cmd_hdr;
+ 		char			hdrextbuf[ISCSI_MAX_AHS_SIZE +
+Index: ofed_kernel/drivers/scsi/libiscsi.c
+===================================================================
+--- ofed_kernel.orig/drivers/scsi/libiscsi.c
++++ ofed_kernel/drivers/scsi/libiscsi.c
+@@ -38,6 +38,14 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+ 
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
++{
++	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++	return iscsi_hostdata(shost->hostdata);
++}
++EXPORT_SYMBOL_GPL(class_to_transport_session);
++
+ /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+ #define SNA32_CHECK 2147483648UL
+ 
+@@ -79,170 +87,91 @@ iscsi_update_cmdsn(struct iscsi_session 
+ 		 * xmit thread
+ 		 */
+ 		if (!list_empty(&session->leadconn->xmitqueue) ||
+-		    !list_empty(&session->leadconn->mgmtqueue)) {
+-			if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+-				scsi_queue_work(session->host,
+-						&session->leadconn->xmitwork);
+-		}
++		    !list_empty(&session->leadconn->mgmtqueue))
++			scsi_queue_work(session->host,
++					&session->leadconn->xmitwork);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+ 
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ 				   struct iscsi_data *hdr)
+ {
+-	struct iscsi_conn *conn = task->conn;
++	struct iscsi_conn *conn = ctask->conn;
+ 
+ 	memset(hdr, 0, sizeof(struct iscsi_data));
+ 	hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+-	hdr->datasn = cpu_to_be32(task->unsol_datasn);
+-	task->unsol_datasn++;
++	hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++	ctask->unsol_datasn++;
+ 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+ 
+-	hdr->itt = task->hdr->itt;
++	hdr->itt = ctask->hdr->itt;
+ 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+-	hdr->offset = cpu_to_be32(task->unsol_offset);
++	hdr->offset = cpu_to_be32(ctask->unsol_offset);
+ 
+-	if (task->unsol_count > conn->max_xmit_dlength) {
++	if (ctask->unsol_count > conn->max_xmit_dlength) {
+ 		hton24(hdr->dlength, conn->max_xmit_dlength);
+-		task->data_count = conn->max_xmit_dlength;
+-		task->unsol_offset += task->data_count;
++		ctask->data_count = conn->max_xmit_dlength;
++		ctask->unsol_offset += ctask->data_count;
+ 		hdr->flags = 0;
+ 	} else {
+-		hton24(hdr->dlength, task->unsol_count);
+-		task->data_count = task->unsol_count;
++		hton24(hdr->dlength, ctask->unsol_count);
++		ctask->data_count = ctask->unsol_count;
+ 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+ 
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
++static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+ {
+-	unsigned exp_len = task->hdr_len + len;
++	unsigned exp_len = ctask->hdr_len + len;
+ 
+-	if (exp_len > task->hdr_max) {
++	if (exp_len > ctask->hdr_max) {
+ 		WARN_ON(1);
+ 		return -EINVAL;
+ 	}
+ 
+ 	WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+-	task->hdr_len = exp_len;
+-	return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+-	struct scsi_cmnd *cmd = task->sc;
+-	unsigned rlen, pad_len;
+-	unsigned short ahslength;
+-	struct iscsi_ecdb_ahdr *ecdb_ahdr;
+-	int rc;
+-
+-	ecdb_ahdr = iscsi_next_hdr(task);
+-	rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+-	BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+-	ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+-	pad_len = iscsi_padding(rlen);
+-
+-	rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+-	                   sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+-	if (rc)
+-		return rc;
+-
+-	if (pad_len)
+-		memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+-	ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+-	ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+-	ecdb_ahdr->reserved = 0;
+-	memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+-	debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+-		   "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+-		   cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+-	return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+-	struct scsi_cmnd *sc = task->sc;
+-	struct iscsi_rlength_ahdr *rlen_ahdr;
+-	int rc;
+-
+-	rlen_ahdr = iscsi_next_hdr(task);
+-	rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+-	if (rc)
+-		return rc;
+-
+-	rlen_ahdr->ahslength =
+-		cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+-						  sizeof(rlen_ahdr->reserved));
+-	rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+-	rlen_ahdr->reserved = 0;
+-	rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+-	debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+-		   "rlen_ahdr->ahslength(%d)\n",
+-		   be32_to_cpu(rlen_ahdr->read_length),
+-		   be16_to_cpu(rlen_ahdr->ahslength));
++	ctask->hdr_len = exp_len;
+ 	return 0;
+ }
+ 
+ /**
+  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+  *
+  * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+  * fields like dlength or final based on how much data it sends
+  */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_conn *conn = task->conn;
++	struct iscsi_conn *conn = ctask->conn;
+ 	struct iscsi_session *session = conn->session;
+-	struct iscsi_cmd *hdr = task->hdr;
+-	struct scsi_cmnd *sc = task->sc;
+-	unsigned hdrlength, cmd_len;
++	struct iscsi_cmd *hdr = ctask->hdr;
++	struct scsi_cmnd *sc = ctask->sc;
++	unsigned hdrlength;
+ 	int rc;
+ 
+-	task->hdr_len = 0;
+-	rc = iscsi_add_hdr(task, sizeof(*hdr));
++	ctask->hdr_len = 0;
++	rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ 	if (rc)
+ 		return rc;
+ 	hdr->opcode = ISCSI_OP_SCSI_CMD;
+ 	hdr->flags = ISCSI_ATTR_SIMPLE;
+ 	int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+-	hdr->itt = build_itt(task->itt, session->age);
++	hdr->itt = build_itt(ctask->itt, session->age);
++	hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ 	hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ 	session->cmdsn++;
+ 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+-	cmd_len = sc->cmd_len;
+-	if (cmd_len < ISCSI_CDB_SIZE)
+-		memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+-	else if (cmd_len > ISCSI_CDB_SIZE) {
+-		rc = iscsi_prep_ecdb_ahs(task);
+-		if (rc)
+-			return rc;
+-		cmd_len = ISCSI_CDB_SIZE;
+-	}
+-	memcpy(hdr->cdb, sc->cmnd, cmd_len);
++	memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++	if (sc->cmd_len < MAX_COMMAND_SIZE)
++		memset(&hdr->cdb[sc->cmd_len], 0,
++			MAX_COMMAND_SIZE - sc->cmd_len);
+ 
+-	task->imm_count = 0;
+-	if (scsi_bidi_cmnd(sc)) {
+-		hdr->flags |= ISCSI_FLAG_CMD_READ;
+-		rc = iscsi_prep_bidi_ahs(task);
+-		if (rc)
+-			return rc;
+-	}
++	ctask->imm_count = 0;
+ 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
+-		unsigned out_len = scsi_out(sc)->length;
+-		hdr->data_length = cpu_to_be32(out_len);
+ 		hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ 		/*
+ 		 * Write counters:
+@@ -258,41 +187,40 @@ static int iscsi_prep_scsi_cmd_pdu(struc
+ 		 *
+ 		 *      pad_count       bytes to be sent as zero-padding
+ 		 */
+-		task->unsol_count = 0;
+-		task->unsol_offset = 0;
+-		task->unsol_datasn = 0;
++		ctask->unsol_count = 0;
++		ctask->unsol_offset = 0;
++		ctask->unsol_datasn = 0;
+ 
+ 		if (session->imm_data_en) {
+-			if (out_len >= session->first_burst)
+-				task->imm_count = min(session->first_burst,
++			if (scsi_bufflen(sc) >= session->first_burst)
++				ctask->imm_count = min(session->first_burst,
+ 							conn->max_xmit_dlength);
+ 			else
+-				task->imm_count = min(out_len,
++				ctask->imm_count = min(scsi_bufflen(sc),
+ 							conn->max_xmit_dlength);
+-			hton24(hdr->dlength, task->imm_count);
++			hton24(hdr->dlength, ctask->imm_count);
+ 		} else
+ 			zero_data(hdr->dlength);
+ 
+ 		if (!session->initial_r2t_en) {
+-			task->unsol_count = min(session->first_burst, out_len)
+-							     - task->imm_count;
+-			task->unsol_offset = task->imm_count;
++			ctask->unsol_count = min((session->first_burst),
++				(scsi_bufflen(sc))) - ctask->imm_count;
++			ctask->unsol_offset = ctask->imm_count;
+ 		}
+ 
+-		if (!task->unsol_count)
++		if (!ctask->unsol_count)
+ 			/* No unsolicit Data-Out's */
+ 			hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ 	} else {
+ 		hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ 		zero_data(hdr->dlength);
+-		hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+ 
+ 		if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ 			hdr->flags |= ISCSI_FLAG_CMD_READ;
+ 	}
+ 
+ 	/* calculate size of additional header segments (AHSs) */
+-	hdrlength = task->hdr_len - sizeof(*hdr);
++	hdrlength = ctask->hdr_len - sizeof(*hdr);
+ 
+ 	WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ 	hdrlength /= ISCSI_PAD_LEN;
+@@ -300,180 +228,110 @@ static int iscsi_prep_scsi_cmd_pdu(struc
+ 	WARN_ON(hdrlength >= 256);
+ 	hdr->hlength = hdrlength & 0xFF;
+ 
+-	if (conn->session->tt->init_task &&
+-	    conn->session->tt->init_task(task))
+-		return -EIO;
+-
+-	task->state = ISCSI_TASK_RUNNING;
+-	list_move_tail(&task->running, &conn->run_list);
++	if (conn->session->tt->init_cmd_task(conn->ctask))
++		return EIO;
+ 
+ 	conn->scsicmd_pdus_cnt++;
+ 	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+-		   "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+-		   "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+-		   "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+-		   scsi_bufflen(sc),
+-		   scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+-		   session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
++		"cmdsn %d win %d]\n",
++		sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++		conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
++		session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ 	return 0;
+ }
+ 
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+  *
+  * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+  */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_conn *conn = task->conn;
++	struct iscsi_conn *conn = ctask->conn;
+ 	struct iscsi_session *session = conn->session;
+-	struct scsi_cmnd *sc = task->sc;
+-
+-	list_del_init(&task->running);
+-	task->state = ISCSI_TASK_COMPLETED;
+-	task->sc = NULL;
+-
+-	if (conn->task == task)
+-		conn->task = NULL;
+-	/*
+-	 * login task is preallocated so do not free
+-	 */
+-	if (conn->login_task == task)
+-		return;
+-
+-	__kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+-
+-	if (conn->ping_task == task)
+-		conn->ping_task = NULL;
++	struct scsi_cmnd *sc = ctask->sc;
+ 
+-	if (sc) {
+-		task->sc = NULL;
+-		/* SCSI eh reuses commands to verify us */
+-		sc->SCp.ptr = NULL;
+-		/*
+-		 * queue command may call this to free the task, but
+-		 * not have setup the sc callback
+-		 */
+-		if (sc->scsi_done)
+-			sc->scsi_done(sc);
+-	}
++	ctask->state = ISCSI_TASK_COMPLETED;
++	ctask->sc = NULL;
++	/* SCSI eh reuses commands to verify us */
++	sc->SCp.ptr = NULL;
++	if (conn->ctask == ctask)
++		conn->ctask = NULL;
++	list_del_init(&ctask->running);
++	__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++	sc->scsi_done(sc);
+ }
+ 
+-void __iscsi_get_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+-	atomic_inc(&task->refcount);
++	atomic_inc(&ctask->refcount);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+ 
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+-	if (atomic_dec_and_test(&task->refcount))
+-		iscsi_complete_command(task);
++	if (atomic_dec_and_test(&ctask->refcount))
++		iscsi_complete_command(ctask);
+ }
+ 
+-void iscsi_put_task(struct iscsi_task *task)
+-{
+-	struct iscsi_session *session = task->conn->session;
+-
+-	spin_lock_bh(&session->lock);
+-	__iscsi_put_task(task);
+-	spin_unlock_bh(&session->lock);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+-
+ /*
+  * session lock must be held
+  */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ 			 int err)
+ {
+ 	struct scsi_cmnd *sc;
+ 
+-	sc = task->sc;
++	sc = ctask->sc;
+ 	if (!sc)
+ 		return;
+ 
+-	if (task->state == ISCSI_TASK_PENDING)
++	if (ctask->state == ISCSI_TASK_PENDING)
+ 		/*
+ 		 * cmd never made it to the xmit thread, so we should not count
+ 		 * the cmd in the sequencing
+ 		 */
+ 		conn->session->queued_cmdsn--;
+ 	else
+-		conn->session->tt->cleanup_task(conn, task);
+-	/*
+-	 * Check if cleanup_task dropped the lock and the command completed,
+-	 */
+-	if (!task->sc)
+-		return;
++		conn->session->tt->cleanup_cmd_task(conn, ctask);
+ 
+ 	sc->result = err;
+-	if (!scsi_bidi_cmnd(sc))
+-		scsi_set_resid(sc, scsi_bufflen(sc));
+-	else {
+-		scsi_out(sc)->resid = scsi_out(sc)->length;
+-		scsi_in(sc)->resid = scsi_in(sc)->length;
+-	}
+-
+-	if (conn->task == task)
+-		conn->task = NULL;
++	scsi_set_resid(sc, scsi_bufflen(sc));
++	if (conn->ctask == ctask)
++		conn->ctask = NULL;
+ 	/* release ref from queuecommand */
+-	__iscsi_put_task(task);
++	__iscsi_put_ctask(ctask);
+ }
+ 
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+-				struct iscsi_task *task)
++/**
++ * iscsi_free_mgmt_task - return mgmt task back to pool
++ * @conn: iscsi connection
++ * @mtask: mtask
++ *
++ * Must be called with session lock.
++ */
++void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++			  struct iscsi_mgmt_task *mtask)
+ {
+-	struct iscsi_session *session = conn->session;
+-	struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+-	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+-	if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+-		return -ENOTCONN;
+-
+-	if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+-	    hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+-		nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+-	/*
+-	 * pre-format CmdSN for outgoing PDU.
+-	 */
+-	nop->cmdsn = cpu_to_be32(session->cmdsn);
+-	if (hdr->itt != RESERVED_ITT) {
+-		hdr->itt = build_itt(task->itt, session->age);
+-		/*
+-		 * TODO: We always use immediate, so we never hit this.
+-		 * If we start to send tmfs or nops as non-immediate then
+-		 * we should start checking the cmdsn numbers for mgmt tasks.
+-		 */
+-		if (conn->c_stage == ISCSI_CONN_STARTED &&
+-		    !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+-			session->queued_cmdsn++;
+-			session->cmdsn++;
+-		}
+-	}
+-
+-	if (session->tt->init_task)
+-		session->tt->init_task(task);
+-
+-	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+-		session->state = ISCSI_STATE_LOGGING_OUT;
++	list_del_init(&mtask->running);
++	if (conn->login_mtask == mtask)
++		return;
+ 
+-	list_move_tail(&task->running, &conn->mgmt_run_list);
+-	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+-		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+-		   task->data_count);
+-	return 0;
++	if (conn->ping_mtask == mtask)
++		conn->ping_mtask = NULL;
++	__kfifo_put(conn->session->mgmtpool.queue,
++		    (void*)&mtask, sizeof(void*));
+ }
++EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+ 
+-static struct iscsi_task *
++static struct iscsi_mgmt_task *
+ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ 		      char *data, uint32_t data_size)
+ {
+ 	struct iscsi_session *session = conn->session;
+-	struct iscsi_task *task;
++	struct iscsi_mgmt_task *mtask;
+ 
+ 	if (session->state == ISCSI_STATE_TERMINATE)
+ 		return NULL;
+@@ -483,56 +341,29 @@ __iscsi_conn_send_pdu(struct iscsi_conn 
+ 		/*
+ 		 * Login and Text are sent serially, in
+ 		 * request-followed-by-response sequence.
+-		 * Same task can be used. Same ITT must be used.
+-		 * Note that login_task is preallocated at conn_create().
++		 * Same mtask can be used. Same ITT must be used.
++		 * Note that login_mtask is preallocated at conn_create().
+ 		 */
+-		task = conn->login_task;
++		mtask = conn->login_mtask;
+ 	else {
+ 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+ 
+-		if (!__kfifo_get(session->cmdpool.queue,
+-				 (void*)&task, sizeof(void*)))
++		if (!__kfifo_get(session->mgmtpool.queue,
++				 (void*)&mtask, sizeof(void*)))
+ 			return NULL;
+-
+-		if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+-		     hdr->ttt == RESERVED_ITT) {
+-			conn->ping_task = task;
+-			conn->last_ping = jiffies;
+-		}
+ 	}
+-	/*
+-	 * released in complete pdu for task we expect a response for, and
+-	 * released by the lld when it has transmitted the task for
+-	 * pdus we do not expect a response for.
+-	 */
+-	atomic_set(&task->refcount, 1);
+-	task->conn = conn;
+-	task->sc = NULL;
+ 
+ 	if (data_size) {
+-		memcpy(task->data, data, data_size);
+-		task->data_count = data_size;
++		memcpy(mtask->data, data, data_size);
++		mtask->data_count = data_size;
+ 	} else
+-		task->data_count = 0;
+-
+-	memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+-	INIT_LIST_HEAD(&task->running);
+-	list_add_tail(&task->running, &conn->mgmtqueue);
+-
+-	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+-		if (iscsi_prep_mgmt_task(conn, task)) {
+-			__iscsi_put_task(task);
+-			return NULL;
+-		}
+-
+-		if (session->tt->xmit_task(task))
+-			task = NULL;
++		mtask->data_count = 0;
+ 
+-	} else
+-		scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+-	return task;
++	memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++	INIT_LIST_HEAD(&mtask->running);
++	list_add_tail(&mtask->running, &conn->mgmtqueue);
++	return mtask;
+ }
+ 
+ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+@@ -546,6 +377,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls
+ 	if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ 		err = -EPERM;
+ 	spin_unlock_bh(&session->lock);
++	scsi_queue_work(session->host, &conn->xmitwork);
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+@@ -554,7 +386,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+  * iscsi_cmd_rsp - SCSI Command Response processing
+  * @conn: iscsi connection
+  * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+  * @data: cmd data buffer
+  * @datalen: len of buffer
+  *
+@@ -562,12 +394,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+  * then completes the command and task.
+  **/
+ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+-			       struct iscsi_task *task, char *data,
++			       struct iscsi_cmd_task *ctask, char *data,
+ 			       int datalen)
+ {
+ 	struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ 	struct iscsi_session *session = conn->session;
+-	struct scsi_cmnd *sc = task->sc;
++	struct scsi_cmnd *sc = ctask->sc;
+ 
+ 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ 	conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+@@ -591,7 +423,7 @@ invalid_datalen:
+ 			goto out;
+ 		}
+ 
+-		senselen = get_unaligned_be16(data);
++		senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ 		if (datalen < senselen)
+ 			goto invalid_datalen;
+ 
+@@ -601,18 +433,6 @@ invalid_datalen:
+ 			   min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ 	}
+ 
+-	if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+-			   ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+-		int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+-		if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+-				(rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+-				 res_count <= scsi_in(sc)->length))
+-			scsi_in(sc)->resid = res_count;
+-		else
+-			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+-	}
+-
+ 	if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ 	                   ISCSI_FLAG_CMD_OVERFLOW)) {
+ 		int res_count = be32_to_cpu(rhdr->residual_count);
+@@ -620,17 +440,19 @@ invalid_datalen:
+ 		if (res_count > 0 &&
+ 		    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ 		     res_count <= scsi_bufflen(sc)))
+-			/* write side for bidi or uni-io set_resid */
+ 			scsi_set_resid(sc, res_count);
+ 		else
+ 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+-	}
++	} else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
++	                          ISCSI_FLAG_CMD_BIDI_OVERFLOW))
++		sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++
+ out:
+ 	debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+-		   (long)sc, sc->result, task->itt);
++		   (long)sc, sc->result, ctask->itt);
+ 	conn->scsirsp_pdus_cnt++;
+ 
+-	__iscsi_put_task(task);
++	__iscsi_put_ctask(ctask);
+ }
+ 
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -655,9 +477,9 @@ static void iscsi_tmf_rsp(struct iscsi_c
+ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ {
+         struct iscsi_nopout hdr;
+-	struct iscsi_task *task;
++	struct iscsi_mgmt_task *mtask;
+ 
+-	if (!rhdr && conn->ping_task)
++	if (!rhdr && conn->ping_mtask)
+ 		return;
+ 
+ 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
+@@ -671,9 +493,18 @@ static void iscsi_send_nopout(struct isc
+ 	} else
+ 		hdr.ttt = RESERVED_ITT;
+ 
+-	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+-	if (!task)
++	mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
++	if (!mtask) {
+ 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
++		return;
++	}
++
++	/* only track our nops */
++	if (!rhdr) {
++		conn->ping_mtask = mtask;
++		conn->last_ping = jiffies;
++	}
++	scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+ 
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+@@ -702,31 +533,6 @@ static int iscsi_handle_reject(struct is
+ }
+ 
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+-	struct iscsi_session *session = conn->session;
+-	uint32_t i;
+-
+-	if (itt == RESERVED_ITT)
+-		return NULL;
+-
+-	i = get_itt(itt);
+-	if (i >= session->cmds_max)
+-		return NULL;
+-
+-	return session->cmds[i];
+-}
+-
+-/**
+  * __iscsi_complete_pdu - complete pdu
+  * @conn: iscsi conn
+  * @hdr: iscsi header
+@@ -737,28 +543,108 @@ static struct iscsi_task *iscsi_itt_to_t
+  * queuecommand or send generic. session lock must be held and verify
+  * itt must have been called.
+  */
+-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+-			 char *data, int datalen)
++static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++				char *data, int datalen)
+ {
+ 	struct iscsi_session *session = conn->session;
+ 	int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+-	struct iscsi_task *task;
++	struct iscsi_cmd_task *ctask;
++	struct iscsi_mgmt_task *mtask;
+ 	uint32_t itt;
+ 
+ 	conn->last_recv = jiffies;
+-	rc = iscsi_verify_itt(conn, hdr->itt);
+-	if (rc)
+-		return rc;
+-
+ 	if (hdr->itt != RESERVED_ITT)
+ 		itt = get_itt(hdr->itt);
+ 	else
+ 		itt = ~0U;
+ 
+-	debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+-		   opcode, conn->id, itt, datalen);
++	if (itt < session->cmds_max) {
++		ctask = session->cmds[itt];
++
++		debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++			   opcode, conn->id, ctask->itt, datalen);
++
++		switch(opcode) {
++		case ISCSI_OP_SCSI_CMD_RSP:
++			BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++			iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++					   datalen);
++			break;
++		case ISCSI_OP_SCSI_DATA_IN:
++			BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++			if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++				conn->scsirsp_pdus_cnt++;
++				__iscsi_put_ctask(ctask);
++			}
++			break;
++		case ISCSI_OP_R2T:
++			/* LLD handles this for now */
++			break;
++		default:
++			rc = ISCSI_ERR_BAD_OPCODE;
++			break;
++		}
++	} else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++		   itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++		mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++		debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++			   opcode, conn->id, mtask->itt, datalen);
++
++		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++		switch(opcode) {
++		case ISCSI_OP_LOGOUT_RSP:
++			if (datalen) {
++				rc = ISCSI_ERR_PROTO;
++				break;
++			}
++			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++			/* fall through */
++		case ISCSI_OP_LOGIN_RSP:
++		case ISCSI_OP_TEXT_RSP:
++			/*
++			 * login related PDU's exp_statsn is handled in
++			 * userspace
++			 */
++			if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++				rc = ISCSI_ERR_CONN_FAILED;
++			iscsi_free_mgmt_task(conn, mtask);
++			break;
++		case ISCSI_OP_SCSI_TMFUNC_RSP:
++			if (datalen) {
++				rc = ISCSI_ERR_PROTO;
++				break;
++			}
++
++			iscsi_tmf_rsp(conn, hdr);
++			iscsi_free_mgmt_task(conn, mtask);
++			break;
++		case ISCSI_OP_NOOP_IN:
++			if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
++			    datalen) {
++				rc = ISCSI_ERR_PROTO;
++				break;
++			}
++			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ 
+-	if (itt == ~0U) {
++			if (conn->ping_mtask != mtask) {
++				/*
++				 * If this is not in response to one of our
++				 * nops then it must be from userspace.
++				 */
++				if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
++						   datalen))
++					rc = ISCSI_ERR_CONN_FAILED;
++			} else
++				mod_timer(&conn->transport_timer,
++					  jiffies + conn->recv_timeout);
++			iscsi_free_mgmt_task(conn, mtask);
++			break;
++		default:
++			rc = ISCSI_ERR_BAD_OPCODE;
++			break;
++		}
++	} else if (itt == ~0U) {
+ 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ 
+ 		switch(opcode) {
+@@ -785,104 +671,11 @@ int __iscsi_complete_pdu(struct iscsi_co
+ 			rc = ISCSI_ERR_BAD_OPCODE;
+ 			break;
+ 		}
+-		goto out;
+-	}
+-
+-	switch(opcode) {
+-	case ISCSI_OP_SCSI_CMD_RSP:
+-	case ISCSI_OP_SCSI_DATA_IN:
+-		task = iscsi_itt_to_ctask(conn, hdr->itt);
+-		if (!task)
+-			return ISCSI_ERR_BAD_ITT;
+-		break;
+-	case ISCSI_OP_R2T:
+-		/*
+-		 * LLD handles R2Ts if they need to.
+-		 */
+-		return 0;
+-	case ISCSI_OP_LOGOUT_RSP:
+-	case ISCSI_OP_LOGIN_RSP:
+-	case ISCSI_OP_TEXT_RSP:
+-	case ISCSI_OP_SCSI_TMFUNC_RSP:
+-	case ISCSI_OP_NOOP_IN:
+-		task = iscsi_itt_to_task(conn, hdr->itt);
+-		if (!task)
+-			return ISCSI_ERR_BAD_ITT;
+-		break;
+-	default:
+-		return ISCSI_ERR_BAD_OPCODE;
+-	}
+-
+-	switch(opcode) {
+-	case ISCSI_OP_SCSI_CMD_RSP:
+-		iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+-		break;
+-	case ISCSI_OP_SCSI_DATA_IN:
+-		if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+-			conn->scsirsp_pdus_cnt++;
+-			iscsi_update_cmdsn(session,
+-					   (struct iscsi_nopin*) hdr);
+-			__iscsi_put_task(task);
+-		}
+-		break;
+-	case ISCSI_OP_LOGOUT_RSP:
+-		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+-		if (datalen) {
+-			rc = ISCSI_ERR_PROTO;
+-			break;
+-		}
+-		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-		goto recv_pdu;
+-	case ISCSI_OP_LOGIN_RSP:
+-	case ISCSI_OP_TEXT_RSP:
+-		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+-		/*
+-		 * login related PDU's exp_statsn is handled in
+-		 * userspace
+-		 */
+-		goto recv_pdu;
+-	case ISCSI_OP_SCSI_TMFUNC_RSP:
+-		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+-		if (datalen) {
+-			rc = ISCSI_ERR_PROTO;
+-			break;
+-		}
+-
+-		iscsi_tmf_rsp(conn, hdr);
+-		__iscsi_put_task(task);
+-		break;
+-	case ISCSI_OP_NOOP_IN:
+-		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+-		if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+-			rc = ISCSI_ERR_PROTO;
+-			break;
+-		}
+-		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+-		if (conn->ping_task != task)
+-			/*
+-			 * If this is not in response to one of our
+-			 * nops then it must be from userspace.
+-			 */
+-			goto recv_pdu;
+-
+-		mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+-		__iscsi_put_task(task);
+-		break;
+-	default:
+-		rc = ISCSI_ERR_BAD_OPCODE;
+-		break;
+-	}
++	} else
++		rc = ISCSI_ERR_BAD_ITT;
+ 
+-out:
+-	return rc;
+-recv_pdu:
+-	if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+-		rc = ISCSI_ERR_CONN_FAILED;
+-	__iscsi_put_task(task);
+ 	return rc;
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+ 
+ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ 		       char *data, int datalen)
+@@ -896,63 +689,51 @@ int iscsi_complete_pdu(struct iscsi_conn
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+ 
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++		     uint32_t *ret_itt)
+ {
+ 	struct iscsi_session *session = conn->session;
+-	uint32_t i;
+-
+-	if (itt == RESERVED_ITT)
+-		return 0;
+-
+-	if (((__force u32)itt & ISCSI_AGE_MASK) !=
+-	    (session->age << ISCSI_AGE_SHIFT)) {
+-		iscsi_conn_printk(KERN_ERR, conn,
+-				  "received itt %x expected session age (%x)\n",
+-				  (__force u32)itt, session->age);
+-		return ISCSI_ERR_BAD_ITT;
+-	}
++	struct iscsi_cmd_task *ctask;
++	uint32_t itt;
+ 
+-	i = get_itt(itt);
+-	if (i >= session->cmds_max) {
+-		iscsi_conn_printk(KERN_ERR, conn,
+-				  "received invalid itt index %u (max cmds "
+-				   "%u.\n", i, session->cmds_max);
+-		return ISCSI_ERR_BAD_ITT;
+-	}
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++	if (hdr->itt != RESERVED_ITT) {
++		if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++		    (session->age << ISCSI_AGE_SHIFT)) {
++			iscsi_conn_printk(KERN_ERR, conn,
++					  "received itt %x expected session "
++					  "age (%x)\n", (__force u32)hdr->itt,
++					  session->age & ISCSI_AGE_MASK);
++			return ISCSI_ERR_BAD_ITT;
++		}
+ 
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+-	struct iscsi_task *task;
++		itt = get_itt(hdr->itt);
++	} else
++		itt = ~0U;
+ 
+-	if (iscsi_verify_itt(conn, itt))
+-		return NULL;
++	if (itt < session->cmds_max) {
++		ctask = session->cmds[itt];
+ 
+-	task = iscsi_itt_to_task(conn, itt);
+-	if (!task || !task->sc)
+-		return NULL;
++		if (!ctask->sc) {
++			iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
++					  "with itt 0x%x\n", ctask->itt);
++			/* force drop */
++			return ISCSI_ERR_NO_SCSI_CMD;
++		}
+ 
+-	if (task->sc->SCp.phase != conn->session->age) {
+-		iscsi_session_printk(KERN_ERR, conn->session,
+-				  "task's session age %d, expected %d\n",
+-				  task->sc->SCp.phase, conn->session->age);
+-		return NULL;
++		if (ctask->sc->SCp.phase != session->age) {
++			iscsi_conn_printk(KERN_ERR, conn,
++					  "iscsi: ctask's session age %d, "
++					  "expected %d\n", ctask->sc->SCp.phase,
++					  session->age);
++			return ISCSI_ERR_SESSION_FAILED;
++		}
+ 	}
+ 
+-	return task;
++	*ret_itt = itt;
++	return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+ 
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,6 +755,61 @@ void iscsi_conn_failure(struct iscsi_con
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+ 
++static void iscsi_prep_mtask(struct iscsi_conn *conn,
++			     struct iscsi_mgmt_task *mtask)
++{
++	struct iscsi_session *session = conn->session;
++	struct iscsi_hdr *hdr = mtask->hdr;
++	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++
++	if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
++	    hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++		nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++	/*
++	 * pre-format CmdSN for outgoing PDU.
++	 */
++	nop->cmdsn = cpu_to_be32(session->cmdsn);
++	if (hdr->itt != RESERVED_ITT) {
++		hdr->itt = build_itt(mtask->itt, session->age);
++		/*
++		 * TODO: We always use immediate, so we never hit this.
++		 * If we start to send tmfs or nops as non-immediate then
++		 * we should start checking the cmdsn numbers for mgmt tasks.
++		 */
++		if (conn->c_stage == ISCSI_CONN_STARTED &&
++		    !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
++			session->queued_cmdsn++;
++			session->cmdsn++;
++		}
++	}
++
++	if (session->tt->init_mgmt_task)
++		session->tt->init_mgmt_task(conn, mtask);
++
++	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
++		   mtask->data_count);
++}
++
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
++{
++	struct iscsi_hdr *hdr = conn->mtask->hdr;
++	int rc;
++
++	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
++		conn->session->state = ISCSI_STATE_LOGGING_OUT;
++	spin_unlock_bh(&conn->session->lock);
++
++	rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++	spin_lock_bh(&conn->session->lock);
++	if (rc)
++		return rc;
++
++	/* done with this in-progress mtask */
++	conn->mtask = NULL;
++	return 0;
++}
++
+ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ {
+ 	struct iscsi_session *session = conn->session;
+@@ -991,38 +827,37 @@ static int iscsi_check_cmdsn_window_clos
+ 	return 0;
+ }
+ 
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
++static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+ {
+-	struct iscsi_task *task = conn->task;
++	struct iscsi_cmd_task *ctask = conn->ctask;
+ 	int rc;
+ 
+-	__iscsi_get_task(task);
++	__iscsi_get_ctask(ctask);
+ 	spin_unlock_bh(&conn->session->lock);
+-	rc = conn->session->tt->xmit_task(task);
++	rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ 	spin_lock_bh(&conn->session->lock);
+-	__iscsi_put_task(task);
++	__iscsi_put_ctask(ctask);
+ 	if (!rc)
+-		/* done with this task */
+-		conn->task = NULL;
++		/* done with this ctask */
++		conn->ctask = NULL;
+ 	return rc;
+ }
+ 
+ /**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
++ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
++ * @ctask: ctask to requeue
+  *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
++ * LLDs that need to run a ctask from the session workqueue should call
++ * this. The session lock must be held.
+  */
+-void iscsi_requeue_task(struct iscsi_task *task)
++void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_conn *conn = task->conn;
++	struct iscsi_conn *conn = ctask->conn;
+ 
+-	list_move_tail(&task->running, &conn->requeue);
++	list_move_tail(&ctask->running, &conn->requeue);
+ 	scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
++EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+ 
+ /**
+  * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1044,8 +879,14 @@ static int iscsi_data_xmit(struct iscsi_
+ 		return -ENODATA;
+ 	}
+ 
+-	if (conn->task) {
+-		rc = iscsi_xmit_task(conn);
++	if (conn->ctask) {
++		rc = iscsi_xmit_ctask(conn);
++		if (rc)
++			goto again;
++	}
++
++	if (conn->mtask) {
++		rc = iscsi_xmit_mtask(conn);
+ 	        if (rc)
+ 		        goto again;
+ 	}
+@@ -1057,14 +898,17 @@ static int iscsi_data_xmit(struct iscsi_
+ 	 */
+ check_mgmt:
+ 	while (!list_empty(&conn->mgmtqueue)) {
+-		conn->task = list_entry(conn->mgmtqueue.next,
+-					 struct iscsi_task, running);
+-		if (iscsi_prep_mgmt_task(conn, conn->task)) {
+-			__iscsi_put_task(conn->task);
+-			conn->task = NULL;
++		conn->mtask = list_entry(conn->mgmtqueue.next,
++					 struct iscsi_mgmt_task, running);
++		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
++			iscsi_free_mgmt_task(conn, conn->mtask);
++			conn->mtask = NULL;
+ 			continue;
+ 		}
+-		rc = iscsi_xmit_task(conn);
++
++		iscsi_prep_mtask(conn, conn->mtask);
++		list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
++		rc = iscsi_xmit_mtask(conn);
+ 		if (rc)
+ 			goto again;
+ 	}
+@@ -1074,21 +918,24 @@ check_mgmt:
+ 		if (conn->tmf_state == TMF_QUEUED)
+ 			break;
+ 
+-		conn->task = list_entry(conn->xmitqueue.next,
+-					 struct iscsi_task, running);
++		conn->ctask = list_entry(conn->xmitqueue.next,
++					 struct iscsi_cmd_task, running);
+ 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+-			fail_command(conn, conn->task, DID_IMM_RETRY << 16);
++			fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ 			continue;
+ 		}
+-		if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+-			fail_command(conn, conn->task, DID_ABORT << 16);
++		if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
++			fail_command(conn, conn->ctask, DID_ABORT << 16);
+ 			continue;
+ 		}
+-		rc = iscsi_xmit_task(conn);
++
++		conn->ctask->state = ISCSI_TASK_RUNNING;
++		list_move_tail(conn->xmitqueue.next, &conn->run_list);
++		rc = iscsi_xmit_ctask(conn);
+ 		if (rc)
+ 			goto again;
+ 		/*
+-		 * we could continuously get new task requests so
++		 * we could continuously get new ctask requests so
+ 		 * we need to check the mgmt queue for nops that need to
+ 		 * be sent to aviod starvation
+ 		 */
+@@ -1106,11 +953,11 @@ check_mgmt:
+ 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ 			break;
+ 
+-		conn->task = list_entry(conn->requeue.next,
+-					 struct iscsi_task, running);
+-		conn->task->state = ISCSI_TASK_RUNNING;
++		conn->ctask = list_entry(conn->requeue.next,
++					 struct iscsi_cmd_task, running);
++		conn->ctask->state = ISCSI_TASK_RUNNING;
+ 		list_move_tail(conn->requeue.next, &conn->run_list);
+-		rc = iscsi_xmit_task(conn);
++		rc = iscsi_xmit_ctask(conn);
+ 		if (rc)
+ 			goto again;
+ 		if (!list_empty(&conn->mgmtqueue))
+@@ -1154,12 +1001,11 @@ enum {
+ 
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+-	struct iscsi_cls_session *cls_session;
+ 	struct Scsi_Host *host;
+ 	int reason = 0;
+ 	struct iscsi_session *session;
+ 	struct iscsi_conn *conn;
+-	struct iscsi_task *task = NULL;
++	struct iscsi_cmd_task *ctask = NULL;
+ 
+ 	sc->scsi_done = done;
+ 	sc->result = 0;
+@@ -1168,11 +1014,10 @@ int iscsi_queuecommand(struct scsi_cmnd 
+ 	host = sc->device->host;
+ 	spin_unlock(host->host_lock);
+ 
+-	cls_session = starget_to_session(scsi_target(sc->device));
+-	session = cls_session->dd_data;
++	session = iscsi_hostdata(host->hostdata);
+ 	spin_lock(&session->lock);
+ 
+-	reason = iscsi_session_chkready(cls_session);
++	reason = iscsi_session_chkready(session_to_cls(session));
+ 	if (reason) {
+ 		sc->result = reason;
+ 		goto fault;
+@@ -1227,39 +1072,26 @@ int iscsi_queuecommand(struct scsi_cmnd 
+ 		goto reject;
+ 	}
+ 
+-	if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++	if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ 			 sizeof(void*))) {
+ 		reason = FAILURE_OOM;
+ 		goto reject;
+ 	}
++	session->queued_cmdsn++;
++
+ 	sc->SCp.phase = session->age;
+-	sc->SCp.ptr = (char *)task;
++	sc->SCp.ptr = (char *)ctask;
+ 
+-	atomic_set(&task->refcount, 1);
+-	task->state = ISCSI_TASK_PENDING;
+-	task->conn = conn;
+-	task->sc = sc;
+-	INIT_LIST_HEAD(&task->running);
+-	list_add_tail(&task->running, &conn->xmitqueue);
+-
+-	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+-		if (iscsi_prep_scsi_cmd_pdu(task)) {
+-			sc->result = DID_ABORT << 16;
+-			sc->scsi_done = NULL;
+-			iscsi_complete_command(task);
+-			goto fault;
+-		}
+-		if (session->tt->xmit_task(task)) {
+-			sc->scsi_done = NULL;
+-			iscsi_complete_command(task);
+-			reason = FAILURE_SESSION_NOT_READY;
+-			goto reject;
+-		}
+-	} else
+-		scsi_queue_work(session->host, &conn->xmitwork);
++	atomic_set(&ctask->refcount, 1);
++	ctask->state = ISCSI_TASK_PENDING;
++	ctask->conn = conn;
++	ctask->sc = sc;
++	INIT_LIST_HEAD(&ctask->running);
+ 
+-	session->queued_cmdsn++;
++	list_add_tail(&ctask->running, &conn->xmitqueue);
+ 	spin_unlock(&session->lock);
++
++	scsi_queue_work(host, &conn->xmitwork);
+ 	spin_lock(host->host_lock);
+ 	return 0;
+ 
+@@ -1272,13 +1104,8 @@ reject:
+ fault:
+ 	spin_unlock(&session->lock);
+ 	debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+-	if (!scsi_bidi_cmnd(sc))
+-		scsi_set_resid(sc, scsi_bufflen(sc));
+-	else {
+-		scsi_out(sc)->resid = scsi_out(sc)->length;
+-		scsi_in(sc)->resid = scsi_in(sc)->length;
+-	}
+-	done(sc);
++	scsi_set_resid(sc, scsi_bufflen(sc));
++	sc->scsi_done(sc);
+ 	spin_lock(host->host_lock);
+ 	return 0;
+ }
+@@ -1295,7 +1122,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_dep
+ 
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+-	struct iscsi_session *session = cls_session->dd_data;
++	struct iscsi_session *session = class_to_transport_session(cls_session);
+ 
+ 	spin_lock_bh(&session->lock);
+ 	if (session->state != ISCSI_STATE_LOGGED_IN) {
+@@ -1309,13 +1136,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery
+ 
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+-	struct iscsi_cls_session *cls_session;
+-	struct iscsi_session *session;
+-	struct iscsi_conn *conn;
+-
+-	cls_session = starget_to_session(scsi_target(sc->device));
+-	session = cls_session->dd_data;
+-	conn = session->leadconn;
++	struct Scsi_Host *host = sc->device->host;
++	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++	struct iscsi_conn *conn = session->leadconn;
+ 
+ 	mutex_lock(&session->eh_mutex);
+ 	spin_lock_bh(&session->lock);
+@@ -1377,11 +1200,11 @@ static int iscsi_exec_task_mgmt_fn(struc
+ 				   int timeout)
+ {
+ 	struct iscsi_session *session = conn->session;
+-	struct iscsi_task *task;
++	struct iscsi_mgmt_task *mtask;
+ 
+-	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
++	mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ 				      NULL, 0);
+-	if (!task) {
++	if (!mtask) {
+ 		spin_unlock_bh(&session->lock);
+ 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ 		spin_lock_bh(&session->lock);
+@@ -1397,6 +1220,7 @@ static int iscsi_exec_task_mgmt_fn(struc
+ 
+ 	spin_unlock_bh(&session->lock);
+ 	mutex_unlock(&session->eh_mutex);
++	scsi_queue_work(session->host, &conn->xmitwork);
+ 
+ 	/*
+ 	 * block eh thread until:
+@@ -1415,7 +1239,7 @@ static int iscsi_exec_task_mgmt_fn(struc
+ 
+ 	mutex_lock(&session->eh_mutex);
+ 	spin_lock_bh(&session->lock);
+-	/* if the session drops it will clean up the task */
++	/* if the session drops it will clean up the mtask */
+ 	if (age != session->age ||
+ 	    session->state != ISCSI_STATE_LOGGED_IN)
+ 		return -ENOTCONN;
+@@ -1429,51 +1253,48 @@ static int iscsi_exec_task_mgmt_fn(struc
+ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ 			      int error)
+ {
+-	struct iscsi_task *task, *tmp;
++	struct iscsi_cmd_task *ctask, *tmp;
+ 
+-	if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+-		conn->task = NULL;
++	if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
++		conn->ctask = NULL;
+ 
+ 	/* flush pending */
+-	list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+-		if (lun == task->sc->device->lun || lun == -1) {
++	list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++		if (lun == ctask->sc->device->lun || lun == -1) {
+ 			debug_scsi("failing pending sc %p itt 0x%x\n",
+-				   task->sc, task->itt);
+-			fail_command(conn, task, error << 16);
++				   ctask->sc, ctask->itt);
++			fail_command(conn, ctask, error << 16);
+ 		}
+ 	}
+ 
+-	list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+-		if (lun == task->sc->device->lun || lun == -1) {
++	list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
++		if (lun == ctask->sc->device->lun || lun == -1) {
+ 			debug_scsi("failing requeued sc %p itt 0x%x\n",
+-				   task->sc, task->itt);
+-			fail_command(conn, task, error << 16);
++				   ctask->sc, ctask->itt);
++			fail_command(conn, ctask, error << 16);
+ 		}
+ 	}
+ 
+ 	/* fail all other running */
+-	list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+-		if (lun == task->sc->device->lun || lun == -1) {
++	list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++		if (lun == ctask->sc->device->lun || lun == -1) {
+ 			debug_scsi("failing in progress sc %p itt 0x%x\n",
+-				   task->sc, task->itt);
+-			fail_command(conn, task, DID_BUS_BUSY << 16);
++				   ctask->sc, ctask->itt);
++			fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ 		}
+ 	}
+ }
+ 
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
++static void iscsi_suspend_tx(struct iscsi_conn *conn)
+ {
+ 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+-	if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+-		scsi_flush_work(conn->session->host);
++	scsi_flush_work(conn->session->host);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+ 
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+ 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+-	if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+-		scsi_queue_work(conn->session->host, &conn->xmitwork);
++	scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+ 
+ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+@@ -1484,7 +1305,7 @@ static enum scsi_eh_timer_return iscsi_e
+ 	enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+ 
+ 	cls_session = starget_to_session(scsi_target(scmd->device));
+-	session = cls_session->dd_data;
++	session = class_to_transport_session(cls_session);
+ 
+ 	debug_scsi("scsi cmd %p timedout\n", scmd);
+ 
+@@ -1522,7 +1343,7 @@ static enum scsi_eh_timer_return iscsi_e
+ 			   jiffies))
+ 		rc = EH_RESET_TIMER;
+ 	/* if in the middle of checking the transport then give us more time */
+-	if (conn->ping_task)
++	if (conn->ping_mtask)
+ 		rc = EH_RESET_TIMER;
+ done:
+ 	spin_unlock(&session->lock);
+@@ -1546,7 +1367,7 @@ static void iscsi_check_transport_timeou
+ 
+ 	recv_timeout *= HZ;
+ 	last_recv = conn->last_recv;
+-	if (conn->ping_task &&
++	if (conn->ping_mtask &&
+ 	    time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+ 			   jiffies)) {
+ 		iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+@@ -1572,30 +1393,27 @@ done:
+ 	spin_unlock(&session->lock);
+ }
+ 
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
++static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ 				      struct iscsi_tm *hdr)
+ {
+ 	memset(hdr, 0, sizeof(*hdr));
+ 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ 	hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+-	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+-	hdr->rtt = task->hdr->itt;
+-	hdr->refcmdsn = task->hdr->cmdsn;
++	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++	hdr->rtt = ctask->hdr->itt;
++	hdr->refcmdsn = ctask->hdr->cmdsn;
+ }
+ 
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+-	struct iscsi_cls_session *cls_session;
+-	struct iscsi_session *session;
++	struct Scsi_Host *host = sc->device->host;
++	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ 	struct iscsi_conn *conn;
+-	struct iscsi_task *task;
++	struct iscsi_cmd_task *ctask;
+ 	struct iscsi_tm *hdr;
+ 	int rc, age;
+ 
+-	cls_session = starget_to_session(scsi_target(sc->device));
+-	session = cls_session->dd_data;
+-
+ 	mutex_lock(&session->eh_mutex);
+ 	spin_lock_bh(&session->lock);
+ 	/*
+@@ -1624,17 +1442,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 	conn->eh_abort_cnt++;
+ 	age = session->age;
+ 
+-	task = (struct iscsi_task *)sc->SCp.ptr;
+-	debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++	ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++	debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+ 
+-	/* task completed before time out */
+-	if (!task->sc) {
++	/* ctask completed before time out */
++	if (!ctask->sc) {
+ 		debug_scsi("sc completed while abort in progress\n");
+ 		goto success;
+ 	}
+ 
+-	if (task->state == ISCSI_TASK_PENDING) {
+-		fail_command(conn, task, DID_ABORT << 16);
++	if (ctask->state == ISCSI_TASK_PENDING) {
++		fail_command(conn, ctask, DID_ABORT << 16);
+ 		goto success;
+ 	}
+ 
+@@ -1644,7 +1462,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 	conn->tmf_state = TMF_QUEUED;
+ 
+ 	hdr = &conn->tmhdr;
+-	iscsi_prep_abort_task_pdu(task, hdr);
++	iscsi_prep_abort_task_pdu(ctask, hdr);
+ 
+ 	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ 		rc = FAILED;
+@@ -1654,20 +1472,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 	switch (conn->tmf_state) {
+ 	case TMF_SUCCESS:
+ 		spin_unlock_bh(&session->lock);
+-		/*
+-		 * stop tx side incase the target had sent a abort rsp but
+-		 * the initiator was still writing out data.
+-		 */
+ 		iscsi_suspend_tx(conn);
+ 		/*
+-		 * we do not stop the recv side because targets have been
+-		 * good and have never sent us a successful tmf response
+-		 * then sent more data for the cmd.
++		 * clean up task if aborted. grab the recv lock as a writer
+ 		 */
++		write_lock_bh(conn->recv_lock);
+ 		spin_lock(&session->lock);
+-		fail_command(conn, task, DID_ABORT << 16);
++		fail_command(conn, ctask, DID_ABORT << 16);
+ 		conn->tmf_state = TMF_INITIAL;
+ 		spin_unlock(&session->lock);
++		write_unlock_bh(conn->recv_lock);
+ 		iscsi_start_tx(conn);
+ 		goto success_unlocked;
+ 	case TMF_TIMEDOUT:
+@@ -1677,7 +1491,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ 	case TMF_NOT_FOUND:
+ 		if (!sc->SCp.ptr) {
+ 			conn->tmf_state = TMF_INITIAL;
+-			/* task completed before tmf abort response */
++			/* ctask completed before tmf abort response */
+ 			debug_scsi("sc completed while abort in progress\n");
+ 			goto success;
+ 		}
+@@ -1690,7 +1504,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ success:
+ 	spin_unlock_bh(&session->lock);
+ success_unlocked:
+-	debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
++	debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ 	mutex_unlock(&session->eh_mutex);
+ 	return SUCCESS;
+ 
+@@ -1698,7 +1512,7 @@ failed:
+ 	spin_unlock_bh(&session->lock);
+ failed_unlocked:
+ 	debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+-		    task ? task->itt : 0);
++		    ctask ? ctask->itt : 0);
+ 	mutex_unlock(&session->eh_mutex);
+ 	return FAILED;
+ }
+@@ -1716,15 +1530,12 @@ static void iscsi_prep_lun_reset_pdu(str
+ 
+ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ {
+-	struct iscsi_cls_session *cls_session;
+-	struct iscsi_session *session;
++	struct Scsi_Host *host = sc->device->host;
++	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ 	struct iscsi_conn *conn;
+ 	struct iscsi_tm *hdr;
+ 	int rc = FAILED;
+ 
+-	cls_session = starget_to_session(scsi_target(sc->device));
+-	session = cls_session->dd_data;
+-
+ 	debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+ 
+ 	mutex_lock(&session->eh_mutex);
+@@ -1767,11 +1578,13 @@ int iscsi_eh_device_reset(struct scsi_cm
+ 	spin_unlock_bh(&session->lock);
+ 
+ 	iscsi_suspend_tx(conn);
+-
++	/* need to grab the recv lock then session lock */
++	write_lock_bh(conn->recv_lock);
+ 	spin_lock(&session->lock);
+ 	fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ 	conn->tmf_state = TMF_INITIAL;
+ 	spin_unlock(&session->lock);
++	write_unlock_bh(conn->recv_lock);
+ 
+ 	iscsi_start_tx(conn);
+ 	goto done;
+@@ -1847,203 +1660,177 @@ void iscsi_pool_free(struct iscsi_pool *
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+ 
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+-	if (!shost->can_queue)
+-		shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+-	return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
++/*
++ * iSCSI Session's hostdata organization:
+  *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
++ *    *------------------* <== hostdata_session(host->hostdata)
++ *    | ptr to class sess|
++ *    |------------------| <== iscsi_hostdata(host->hostdata)
++ *    | iscsi_session    |
++ *    *------------------*
+  */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+-				   int dd_data_size, uint16_t qdepth)
+-{
+-	struct Scsi_Host *shost;
+ 
+-	shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+-	if (!shost)
+-		return NULL;
+-	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++#define hostdata_privsize(_sz)	(sizeof(unsigned long) + _sz + \
++				 _sz % sizeof(unsigned long))
+ 
+-	if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+-		if (qdepth != 0)
+-			printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+-			       "Queue depth must be between 1 and %d.\n",
+-			       qdepth, ISCSI_MAX_CMD_PER_LUN);
+-		qdepth = ISCSI_DEF_CMD_PER_LUN;
+-	}
+-	shost->cmd_per_lun = qdepth;
+-	return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
+- *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
+- */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+-	iscsi_host_for_each_session(shost, iscsi_session_teardown);
+-	scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+-
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+-	struct iscsi_host *ihost = shost_priv(shost);
+-
+-	kfree(ihost->netdev);
+-	kfree(ihost->hwaddress);
+-	kfree(ihost->initiatorname);
+-	scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+ 
+ /**
+  * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+  * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
++ * @cmds_max: scsi host can queue
++ * @qdepth: scsi host cmds per lun
++ * @cmd_task_size: LLD ctask private data size
++ * @mgmt_task_size: LLD mtask private data size
+  * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+  *
+  * This can be used by software iscsi_transports that allocate
+  * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+-		    uint16_t cmds_max, int cmd_task_size,
+-		    uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++		    struct scsi_transport_template *scsit,
++		    uint16_t cmds_max, uint16_t qdepth,
++		    int cmd_task_size, int mgmt_task_size,
++		    uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++	struct Scsi_Host *shost;
+ 	struct iscsi_session *session;
+ 	struct iscsi_cls_session *cls_session;
+-	int cmd_i, scsi_cmds, total_cmds = cmds_max;
++	int cmd_i;
+ 
+-	if (!total_cmds)
+-		total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+-	/*
+-	 * The iscsi layer needs some tasks for nop handling and tmfs,
+-	 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+-	 * + 1 command for scsi IO.
+-	 */
+-	if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+-		printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+-		       "must be a power of two that is at least %d.\n",
+-		       total_cmds, ISCSI_TOTAL_CMDS_MIN);
+-		return NULL;
+-	}
+-
+-	if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+-		printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+-		       "must be a power of 2 less than or equal to %d.\n",
+-		       cmds_max, ISCSI_TOTAL_CMDS_MAX);
+-		total_cmds = ISCSI_TOTAL_CMDS_MAX;
++	if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
++		if (qdepth != 0)
++			printk(KERN_ERR "iscsi: invalid queue depth of %d. "
++			      "Queue depth must be between 1 and %d.\n",
++			      qdepth, ISCSI_MAX_CMD_PER_LUN);
++		qdepth = ISCSI_DEF_CMD_PER_LUN;
+ 	}
+ 
+-	if (!is_power_of_2(total_cmds)) {
+-		printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+-		       "must be a power of 2.\n", total_cmds);
+-		total_cmds = rounddown_pow_of_two(total_cmds);
+-		if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+-			return NULL;
+-		printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+-		       total_cmds);
++	if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
++	    cmds_max < 2) {
++		if (cmds_max != 0)
++			printk(KERN_ERR "iscsi: invalid can_queue of %d. "
++			       "can_queue must be a power of 2 and between "
++			       "2 and %d - setting to %d.\n", cmds_max,
++			       ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
++		cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ 	}
+-	scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+ 
+-	cls_session = iscsi_alloc_session(shost, iscsit,
+-					  sizeof(struct iscsi_session));
+-	if (!cls_session)
++	shost = scsi_host_alloc(iscsit->host_template,
++				hostdata_privsize(sizeof(*session)));
++	if (!shost)
+ 		return NULL;
+-	session = cls_session->dd_data;
+-	session->cls_session = cls_session;
++
++	/* the iscsi layer takes one task for reserve */
++	shost->can_queue = cmds_max - 1;
++	shost->cmd_per_lun = qdepth;
++	shost->max_id = 1;
++	shost->max_channel = 0;
++	shost->max_lun = iscsit->max_lun;
++	shost->max_cmd_len = iscsit->max_cmd_len;
++	shost->transportt = scsit;
++	shost->transportt->create_work_queue = 1;
++	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++	*hostno = shost->host_no;
++
++	session = iscsi_hostdata(shost->hostdata);
++	memset(session, 0, sizeof(struct iscsi_session));
+ 	session->host = shost;
+ 	session->state = ISCSI_STATE_FREE;
+ 	session->fast_abort = 1;
+ 	session->lu_reset_timeout = 15;
+ 	session->abort_timeout = 10;
+-	session->scsi_cmds_max = scsi_cmds;
+-	session->cmds_max = total_cmds;
++	session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++	session->cmds_max = cmds_max;
+ 	session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ 	session->exp_cmdsn = initial_cmdsn + 1;
+ 	session->max_cmdsn = initial_cmdsn + 1;
+ 	session->max_r2t = 1;
+ 	session->tt = iscsit;
+ 	mutex_init(&session->eh_mutex);
+-	spin_lock_init(&session->lock);
+ 
+ 	/* initialize SCSI PDU commands pool */
+ 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ 			    (void***)&session->cmds,
+-			    cmd_task_size + sizeof(struct iscsi_task)))
++			    cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ 		goto cmdpool_alloc_fail;
+ 
+ 	/* pre-format cmds pool with ITT */
+ 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+-		struct iscsi_task *task = session->cmds[cmd_i];
++		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+ 
+ 		if (cmd_task_size)
+-			task->dd_data = &task[1];
+-		task->itt = cmd_i;
+-		INIT_LIST_HEAD(&task->running);
++			ctask->dd_data = &ctask[1];
++		ctask->itt = cmd_i;
++		INIT_LIST_HEAD(&ctask->running);
+ 	}
+ 
+-	if (!try_module_get(iscsit->owner))
+-		goto module_get_fail;
++	spin_lock_init(&session->lock);
+ 
+-	if (iscsi_add_session(cls_session, id))
++	/* initialize immediate command pool */
++	if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++			   (void***)&session->mgmt_cmds,
++			   mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++		goto mgmtpool_alloc_fail;
++
++
++	/* pre-format immediate cmds pool with ITT */
++	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++		struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++		if (mgmt_task_size)
++			mtask->dd_data = &mtask[1];
++		mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++		INIT_LIST_HEAD(&mtask->running);
++	}
++
++	if (scsi_add_host(shost, NULL))
++		goto add_host_fail;
++
++	if (!try_module_get(iscsit->owner))
+ 		goto cls_session_fail;
++
++	cls_session = iscsi_create_session(shost, iscsit, 0);
++	if (!cls_session)
++		goto module_put;
++	*(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ 	return cls_session;
+ 
+-cls_session_fail:
++module_put:
+ 	module_put(iscsit->owner);
+-module_get_fail:
++cls_session_fail:
++	scsi_remove_host(shost);
++add_host_fail:
++	iscsi_pool_free(&session->mgmtpool);
++mgmtpool_alloc_fail:
+ 	iscsi_pool_free(&session->cmdpool);
+ cmdpool_alloc_fail:
+-	iscsi_free_session(cls_session);
++	scsi_host_put(shost);
+ 	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+ 
+ /**
+  * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+  *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+-	struct iscsi_session *session = cls_session->dd_data;
++	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ 	struct module *owner = cls_session->transport->owner;
+ 
++	iscsi_remove_session(cls_session);
++	scsi_remove_host(shost);
++
++	iscsi_pool_free(&session->mgmtpool);
+ 	iscsi_pool_free(&session->cmdpool);
+ 
+ 	kfree(session->password);
+@@ -2051,10 +1838,12 @@ void iscsi_session_teardown(struct iscsi
+ 	kfree(session->username);
+ 	kfree(session->username_in);
+ 	kfree(session->targetname);
++	kfree(session->netdev);
++	kfree(session->hwaddress);
+ 	kfree(session->initiatorname);
+-	kfree(session->ifacename);
+ 
+-	iscsi_destroy_session(cls_session);
++	iscsi_free_session(cls_session);
++	scsi_host_put(shost);
+ 	module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,26 +1851,22 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown
+ /**
+  * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+  * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+  * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+-		 uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+-	struct iscsi_session *session = cls_session->dd_data;
++	struct iscsi_session *session = class_to_transport_session(cls_session);
+ 	struct iscsi_conn *conn;
+ 	struct iscsi_cls_conn *cls_conn;
+ 	char *data;
+ 
+-	cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+-				     conn_idx);
++	cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ 	if (!cls_conn)
+ 		return NULL;
+ 	conn = cls_conn->dd_data;
+-	memset(conn, 0, sizeof(*conn) + dd_size);
++	memset(conn, 0, sizeof(*conn));
+ 
+-	conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ 	conn->session = session;
+ 	conn->cls_conn = cls_conn;
+ 	conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+@@ -2100,30 +1885,30 @@ iscsi_conn_setup(struct iscsi_cls_sessio
+ 	INIT_LIST_HEAD(&conn->requeue);
+ 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+ 
+-	/* allocate login_task used for the login/text sequences */
++	/* allocate login_mtask used for the login/text sequences */
+ 	spin_lock_bh(&session->lock);
+-	if (!__kfifo_get(session->cmdpool.queue,
+-                         (void*)&conn->login_task,
++	if (!__kfifo_get(session->mgmtpool.queue,
++                         (void*)&conn->login_mtask,
+ 			 sizeof(void*))) {
+ 		spin_unlock_bh(&session->lock);
+-		goto login_task_alloc_fail;
++		goto login_mtask_alloc_fail;
+ 	}
+ 	spin_unlock_bh(&session->lock);
+ 
+ 	data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ 	if (!data)
+-		goto login_task_data_alloc_fail;
+-	conn->login_task->data = conn->data = data;
++		goto login_mtask_data_alloc_fail;
++	conn->login_mtask->data = conn->data = data;
+ 
+ 	init_timer(&conn->tmf_timer);
+ 	init_waitqueue_head(&conn->ehwait);
+ 
+ 	return cls_conn;
+ 
+-login_task_data_alloc_fail:
+-	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++	__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ 		    sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
+ 	iscsi_destroy_conn(cls_conn);
+ 	return NULL;
+ }
+@@ -2183,7 +1968,7 @@ void iscsi_conn_teardown(struct iscsi_cl
+ 	spin_lock_bh(&session->lock);
+ 	kfree(conn->data);
+ 	kfree(conn->persistent_address);
+-	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++	__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ 		    sizeof(void*));
+ 	if (session->leadconn == conn)
+ 		session->leadconn = NULL;
+@@ -2255,7 +2040,7 @@ int iscsi_conn_start(struct iscsi_cls_co
+ 	}
+ 	spin_unlock_bh(&session->lock);
+ 
+-	iscsi_unblock_session(session->cls_session);
++	iscsi_unblock_session(session_to_cls(session));
+ 	wake_up(&conn->ehwait);
+ 	return 0;
+ }
+@@ -2264,23 +2049,21 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+-	struct iscsi_task *task, *tmp;
++	struct iscsi_mgmt_task *mtask, *tmp;
+ 
+ 	/* handle pending */
+-	list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+-		debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+-		/* release ref from prep task */
+-		__iscsi_put_task(task);
++	list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
++		debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++		iscsi_free_mgmt_task(conn, mtask);
+ 	}
+ 
+ 	/* handle running */
+-	list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+-		debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+-		/* release ref from prep task */
+-		__iscsi_put_task(task);
++	list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++		debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++		iscsi_free_mgmt_task(conn, mtask);
+ 	}
+ 
+-	conn->task = NULL;
++	conn->mtask = NULL;
+ }
+ 
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2299,6 +2082,17 @@ static void iscsi_start_session_recovery
+ 	}
+ 
+ 	/*
++	 * The LLD either freed/unset the lock on us, or userspace called
++	 * stop but did not create a proper connection (connection was never
++	 * bound or it was unbound then stop was called).
++	 */
++	if (!conn->recv_lock) {
++		spin_unlock_bh(&session->lock);
++		mutex_unlock(&session->eh_mutex);
++		return;
++	}
++
++	/*
+ 	 * When this is called for the in_login state, we only want to clean
+ 	 * up the login task and connection. We do not need to block and set
+ 	 * the recovery state again
+@@ -2314,6 +2108,11 @@ static void iscsi_start_session_recovery
+ 	spin_unlock_bh(&session->lock);
+ 
+ 	iscsi_suspend_tx(conn);
++
++	write_lock_bh(conn->recv_lock);
++	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++	write_unlock_bh(conn->recv_lock);
++
+ 	/*
+ 	 * for connection level recovery we should not calculate
+ 	 * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +2125,7 @@ static void iscsi_start_session_recovery
+ 		if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ 		    old_stop_stage != STOP_CONN_RECOVER) {
+ 			debug_scsi("blocking session\n");
+-			iscsi_block_session(session->cls_session);
++			iscsi_block_session(session_to_cls(session));
+ 		}
+ 	}
+ 
+@@ -2361,7 +2160,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ 		    struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+-	struct iscsi_session *session = cls_session->dd_data;
++	struct iscsi_session *session = class_to_transport_session(cls_session);
+ 	struct iscsi_conn *conn = cls_conn->dd_data;
+ 
+ 	spin_lock_bh(&session->lock);
+@@ -2500,14 +2299,6 @@ int iscsi_set_param(struct iscsi_cls_con
+ 		if (!conn->persistent_address)
+ 			return -ENOMEM;
+ 		break;
+-	case ISCSI_PARAM_IFACE_NAME:
+-		if (!session->ifacename)
+-			session->ifacename = kstrdup(buf, GFP_KERNEL);
+-		break;
+-	case ISCSI_PARAM_INITIATOR_NAME:
+-		if (!session->initiatorname)
+-			session->initiatorname = kstrdup(buf, GFP_KERNEL);
+-		break;
+ 	default:
+ 		return -ENOSYS;
+ 	}
+@@ -2519,7 +2310,8 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ 			    enum iscsi_param param, char *buf)
+ {
+-	struct iscsi_session *session = cls_session->dd_data;
++	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ 	int len;
+ 
+ 	switch(param) {
+@@ -2574,15 +2366,6 @@ int iscsi_session_get_param(struct iscsi
+ 	case ISCSI_PARAM_PASSWORD_IN:
+ 		len = sprintf(buf, "%s\n", session->password_in);
+ 		break;
+-	case ISCSI_PARAM_IFACE_NAME:
+-		len = sprintf(buf, "%s\n", session->ifacename);
+-		break;
+-	case ISCSI_PARAM_INITIATOR_NAME:
+-		if (!session->initiatorname)
+-			len = sprintf(buf, "%s\n", "unknown");
+-		else
+-			len = sprintf(buf, "%s\n", session->initiatorname);
+-		break;
+ 	default:
+ 		return -ENOSYS;
+ 	}
+@@ -2642,35 +2425,29 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ 			 char *buf)
+ {
+-	struct iscsi_host *ihost = shost_priv(shost);
++	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ 	int len;
+ 
+ 	switch (param) {
+ 	case ISCSI_HOST_PARAM_NETDEV_NAME:
+-		if (!ihost->netdev)
++		if (!session->netdev)
+ 			len = sprintf(buf, "%s\n", "default");
+ 		else
+-			len = sprintf(buf, "%s\n", ihost->netdev);
++			len = sprintf(buf, "%s\n", session->netdev);
+ 		break;
+ 	case ISCSI_HOST_PARAM_HWADDRESS:
+-		if (!ihost->hwaddress)
++		if (!session->hwaddress)
+ 			len = sprintf(buf, "%s\n", "default");
+ 		else
+-			len = sprintf(buf, "%s\n", ihost->hwaddress);
++			len = sprintf(buf, "%s\n", session->hwaddress);
+ 		break;
+ 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
+-		if (!ihost->initiatorname)
+-			len = sprintf(buf, "%s\n", "unknown");
+-		else
+-			len = sprintf(buf, "%s\n", ihost->initiatorname);
+-		break;
+-	case ISCSI_HOST_PARAM_IPADDRESS:
+-		if (!strlen(ihost->local_address))
++		if (!session->initiatorname)
+ 			len = sprintf(buf, "%s\n", "unknown");
+ 		else
+-			len = sprintf(buf, "%s\n",
+-				      ihost->local_address);
++			len = sprintf(buf, "%s\n", session->initiatorname);
+ 		break;
++
+ 	default:
+ 		return -ENOSYS;
+ 	}
+@@ -2682,20 +2459,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ 			 char *buf, int buflen)
+ {
+-	struct iscsi_host *ihost = shost_priv(shost);
++	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ 
+ 	switch (param) {
+ 	case ISCSI_HOST_PARAM_NETDEV_NAME:
+-		if (!ihost->netdev)
+-			ihost->netdev = kstrdup(buf, GFP_KERNEL);
++		if (!session->netdev)
++			session->netdev = kstrdup(buf, GFP_KERNEL);
+ 		break;
+ 	case ISCSI_HOST_PARAM_HWADDRESS:
+-		if (!ihost->hwaddress)
+-			ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
++		if (!session->hwaddress)
++			session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ 		break;
+ 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
+-		if (!ihost->initiatorname)
+-			ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
++		if (!session->initiatorname)
++			session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ 		break;
+ 	default:
+ 		return -ENOSYS;
+Index: ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+===================================================================
+--- ofed_kernel.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,24 +30,23 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+ 
+-#define ISCSI_SESSION_ATTRS 21
++#define ISCSI_SESSION_ATTRS 19
+ #define ISCSI_CONN_ATTRS 13
+ #define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_TRANSPORT_VERSION "2.0-869"
+ 
+ struct iscsi_internal {
+ 	int daemon_pid;
+ 	struct scsi_transport_template t;
+ 	struct iscsi_transport *iscsi_transport;
+ 	struct list_head list;
+-	struct device dev;
++	struct class_device cdev;
+ 
+-	struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++	struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ 	struct transport_container conn_cont;
+-	struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++	struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ 	struct transport_container session_cont;
+-	struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++	struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+ 
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+@@ -64,12 +63,12 @@ static DEFINE_SPINLOCK(iscsi_transport_l
+ #define to_iscsi_internal(tmpl) \
+ 	container_of(tmpl, struct iscsi_internal, t)
+ 
+-#define dev_to_iscsi_internal(_dev) \
+-	container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++	container_of(_cdev, struct iscsi_internal, cdev)
+ 
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+-	struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++	struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ 	kfree(priv);
+ }
+ 
+@@ -79,33 +78,37 @@ static void iscsi_transport_release(stru
+  */
+ static struct class iscsi_transport_class = {
+ 	.name = "iscsi_transport",
+-	.dev_release = iscsi_transport_release,
++	.release = iscsi_transport_release,
+ };
+ 
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+-		      char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+-	struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++	struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ 	return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+ 
+ #define show_transport_attr(name, format)				\
+ static ssize_t								\
+-show_transport_##name(struct device *dev, 				\
+-		      struct device_attribute *attr,char *buf)		\
++show_transport_##name(struct class_device *cdev, char *buf)		\
+ {									\
+-	struct iscsi_internal *priv = dev_to_iscsi_internal(dev);	\
++	struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);	\
+ 	return sprintf(buf, format"\n", priv->iscsi_transport->name);	\
+ }									\
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+ 
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+ 
+ static struct attribute *iscsi_transport_attrs[] = {
+-	&dev_attr_handle.attr,
+-	&dev_attr_caps.attr,
++	&class_device_attr_handle.attr,
++	&class_device_attr_caps.attr,
++	&class_device_attr_max_lun.attr,
++	&class_device_attr_max_conn.attr,
++	&class_device_attr_max_cmd_len.attr,
+ 	NULL,
+ };
+ 
+@@ -113,142 +116,21 @@ static struct attribute_group iscsi_tran
+ 	.attrs = iscsi_transport_attrs,
+ };
+ 
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+-	container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store)	\
+-struct device_attribute dev_attr_##_prefix##_##_name =	\
+-        __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+-	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+-	kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+-	.name = "iscsi_endpoint",
+-	.dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+-	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+-	return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+-	&dev_attr_ep_handle.attr,
+-	NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+-	.attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+-	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+-	unsigned int *epid = (unsigned int *) data;
+-
+-	return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+-	struct device *dev;
+-	struct iscsi_endpoint *ep;
+-	unsigned int id;
+-	int err;
+-
+-	for (id = 1; id < ISCSI_MAX_EPID; id++) {
+-		dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+-					iscsi_match_epid);
+-		if (!dev)
+-			break;
+-	}
+-	if (id == ISCSI_MAX_EPID) {
+-		printk(KERN_ERR "Too many connections. Max supported %u\n",
+-		       ISCSI_MAX_EPID - 1);
+-		return NULL;
+-	}
+-
+-	ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+-	if (!ep)
+-		return NULL;
+-
+-	ep->id = id;
+-	ep->dev.class = &iscsi_endpoint_class;
+-	snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+-	err = device_register(&ep->dev);
+-        if (err)
+-                goto free_ep;
+-
+-	err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+-	if (err)
+-		goto unregister_dev;
+-
+-	if (dd_size)
+-		ep->dd_data = &ep[1];
+-	return ep;
+-
+-unregister_dev:
+-	device_unregister(&ep->dev);
+-	return NULL;
+-
+-free_ep:
+-	kfree(ep);
+-	return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+-	sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+-	device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+-	struct iscsi_endpoint *ep;
+-	struct device *dev;
+ 
+-	dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+-				iscsi_match_epid);
+-	if (!dev)
+-		return NULL;
+-
+-	ep = iscsi_dev_to_endpoint(dev);
+-	/*
+-	 * we can drop this now because the interface will prevent
+-	 * removals and lookups from racing.
+-	 */
+-	put_device(dev);
+-	return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+ 
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+-			    struct device *cdev)
++			    struct class_device *cdev)
+ {
+ 	struct Scsi_Host *shost = dev_to_shost(dev);
+-	struct iscsi_cls_host *ihost = shost->shost_data;
++	struct iscsi_host *ihost = shost->shost_data;
+ 
+ 	memset(ihost, 0, sizeof(*ihost));
+-	atomic_set(&ihost->nr_scans, 0);
++	INIT_LIST_HEAD(&ihost->sessions);
+ 	mutex_init(&ihost->mutex);
++	atomic_set(&ihost->nr_scans, 0);
+ 
+-	snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+-		 "iscsi_scan_%d", shost->host_no);
++	snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
++		shost->host_no);
+ 	ihost->scan_workq = create_singlethread_workqueue(
+ 						ihost->scan_workq_name);
+ 	if (!ihost->scan_workq)
+@@ -257,10 +139,10 @@ static int iscsi_setup_host(struct trans
+ }
+ 
+ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+-			     struct device *cdev)
++			     struct class_device *cdev)
+ {
+ 	struct Scsi_Host *shost = dev_to_shost(dev);
+-	struct iscsi_cls_host *ihost = shost->shost_data;
++	struct iscsi_host *ihost = shost->shost_data;
+ 
+ 	destroy_workqueue(ihost->scan_workq);
+ 	return 0;
+@@ -403,24 +285,6 @@ static int iscsi_is_session_dev(const st
+ 	return dev->release == iscsi_session_release;
+ }
+ 
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+-	void (* fn) (struct iscsi_cls_session *) = data;
+-
+-	if (!iscsi_is_session_dev(dev))
+-		return 0;
+-	fn(iscsi_dev_to_session(dev));
+-	return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+-				 void (*fn)(struct iscsi_cls_session *))
+-{
+-	device_for_each_child(&shost->shost_gendev, fn,
+-			      iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+ /**
+  * iscsi_scan_finished - helper to report when running scans are done
+  * @shost: scsi host
+@@ -431,7 +295,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_for_each_se
+  */
+ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+-	struct iscsi_cls_host *ihost = shost->shost_data;
++	struct iscsi_host *ihost = shost->shost_data;
+ 	/*
+ 	 * qla4xxx will have kicked off some session unblocks before calling
+ 	 * scsi_scan_host, so just wait for them to complete.
+@@ -440,61 +304,22 @@ int iscsi_scan_finished(struct Scsi_Host
+ }
+ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+ 
+-struct iscsi_scan_data {
+-	unsigned int channel;
+-	unsigned int id;
+-	unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++			   uint id, uint lun)
+ {
+-	struct iscsi_scan_data *scan_data = data;
++	struct iscsi_host *ihost = shost->shost_data;
+ 	struct iscsi_cls_session *session;
+-	struct Scsi_Host *shost;
+-	struct iscsi_cls_host *ihost;
+-	unsigned long flags;
+-	unsigned int id;
+-
+-	if (!iscsi_is_session_dev(dev))
+-		return 0;
+-
+-	session = iscsi_dev_to_session(dev);
+-	shost = iscsi_session_to_shost(session);
+-	ihost = shost->shost_data;
+ 
+ 	mutex_lock(&ihost->mutex);
+-	spin_lock_irqsave(&session->lock, flags);
+-	if (session->state != ISCSI_SESSION_LOGGED_IN) {
+-		spin_unlock_irqrestore(&session->lock, flags);
+-		mutex_unlock(&ihost->mutex);
+-		return 0;
+-	}
+-	id = session->target_id;
+-	spin_unlock_irqrestore(&session->lock, flags);
+-
+-	if (id != ISCSI_MAX_TARGET) {
+-		if ((scan_data->channel == SCAN_WILD_CARD ||
+-		     scan_data->channel == 0) &&
+-		    (scan_data->id == SCAN_WILD_CARD ||
+-		     scan_data->id == id))
+-			scsi_scan_target(&session->dev, 0, id,
+-					 scan_data->lun, 1);
++	list_for_each_entry(session, &ihost->sessions, host_list) {
++		if ((channel == SCAN_WILD_CARD || channel == 0) &&
++		    (id == SCAN_WILD_CARD || id == session->target_id))
++			scsi_scan_target(&session->dev, 0,
++					 session->target_id, lun, 1);
+ 	}
+ 	mutex_unlock(&ihost->mutex);
+-	return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+-			   uint id, uint lun)
+-{
+-	struct iscsi_scan_data scan_data;
+-
+-	scan_data.channel = channel;
+-	scan_data.id = id;
+-	scan_data.lun = lun;
+ 
+-	return device_for_each_child(&shost->shost_gendev, &scan_data,
+-				     iscsi_user_scan_session);
++	return 0;
+ }
+ 
+ static void iscsi_scan_session(struct work_struct *work)
+@@ -502,14 +327,19 @@ static void iscsi_scan_session(struct wo
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session, scan_work);
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+-	struct iscsi_cls_host *ihost = shost->shost_data;
+-	struct iscsi_scan_data scan_data;
++	struct iscsi_host *ihost = shost->shost_data;
++	unsigned long flags;
+ 
+-	scan_data.channel = 0;
+-	scan_data.id = SCAN_WILD_CARD;
+-	scan_data.lun = SCAN_WILD_CARD;
++	spin_lock_irqsave(&session->lock, flags);
++	if (session->state != ISCSI_SESSION_LOGGED_IN) {
++		spin_unlock_irqrestore(&session->lock, flags);
++		goto done;
++	}
++	spin_unlock_irqrestore(&session->lock, flags);
+ 
+-	iscsi_user_scan_session(&session->dev, &scan_data);
++	scsi_scan_target(&session->dev, 0, session->target_id,
++			 SCAN_WILD_CARD, 1);
++done:
+ 	atomic_dec(&ihost->nr_scans);
+ }
+ 
+@@ -549,7 +379,7 @@ static void __iscsi_unblock_session(stru
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+-	struct iscsi_cls_host *ihost = shost->shost_data;
++	struct iscsi_host *ihost = shost->shost_data;
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -617,19 +447,15 @@ static void __iscsi_unbind_session(struc
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unbind_work);
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+-	struct iscsi_cls_host *ihost = shost->shost_data;
+-	unsigned long flags;
++	struct iscsi_host *ihost = shost->shost_data;
+ 
+ 	/* Prevent new scans and make sure scanning is not in progress */
+ 	mutex_lock(&ihost->mutex);
+-	spin_lock_irqsave(&session->lock, flags);
+-	if (session->target_id == ISCSI_MAX_TARGET) {
+-		spin_unlock_irqrestore(&session->lock, flags);
++	if (list_empty(&session->host_list)) {
+ 		mutex_unlock(&ihost->mutex);
+ 		return;
+ 	}
+-	session->target_id = ISCSI_MAX_TARGET;
+-	spin_unlock_irqrestore(&session->lock, flags);
++	list_del_init(&session->host_list);
+ 	mutex_unlock(&ihost->mutex);
+ 
+ 	scsi_remove_target(&session->dev);
+@@ -639,18 +465,18 @@ static void __iscsi_unbind_session(struc
+ static int iscsi_unbind_session(struct iscsi_cls_session *session)
+ {
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+-	struct iscsi_cls_host *ihost = shost->shost_data;
++	struct iscsi_host *ihost = shost->shost_data;
+ 
+ 	return queue_work(ihost->scan_workq, &session->unbind_work);
+ }
+ 
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+-		    int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++		    struct iscsi_transport *transport)
+ {
+ 	struct iscsi_cls_session *session;
+ 
+-	session = kzalloc(sizeof(*session) + dd_size,
++	session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ 			  GFP_KERNEL);
+ 	if (!session)
+ 		return NULL;
+@@ -659,6 +485,7 @@ iscsi_alloc_session(struct Scsi_Host *sh
+ 	session->recovery_tmo = 120;
+ 	session->state = ISCSI_SESSION_FREE;
+ 	INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++	INIT_LIST_HEAD(&session->host_list);
+ 	INIT_LIST_HEAD(&session->sess_list);
+ 	INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ 	INIT_WORK(&session->block_work, __iscsi_block_session);
+@@ -671,57 +498,22 @@ iscsi_alloc_session(struct Scsi_Host *sh
+ 	session->dev.parent = &shost->shost_gendev;
+ 	session->dev.release = iscsi_session_release;
+ 	device_initialize(&session->dev);
+-	if (dd_size)
++	if (transport->sessiondata_size)
+ 		session->dd_data = &session[1];
+ 	return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+ 
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+-	struct iscsi_cls_session *session;
+-	unsigned long flags;
+-	int err = 0;
+-
+-	if (!iscsi_is_session_dev(dev))
+-		return 0;
+-
+-	session = iscsi_dev_to_session(dev);
+-	spin_lock_irqsave(&session->lock, flags);
+-	if (*((unsigned int *) data) == session->target_id)
+-		err = -EEXIST;
+-	spin_unlock_irqrestore(&session->lock, flags);
+-	return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+-	struct iscsi_cls_host *ihost;
++	struct iscsi_host *ihost;
+ 	unsigned long flags;
+-	unsigned int id = target_id;
+ 	int err;
+ 
+ 	ihost = shost->shost_data;
+ 	session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+-	if (id == ISCSI_MAX_TARGET) {
+-		for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+-			err = device_for_each_child(&shost->shost_gendev, &id,
+-						    iscsi_get_next_target_id);
+-			if (!err)
+-				break;
+-		}
+-
+-		if (id == ISCSI_MAX_TARGET) {
+-			iscsi_cls_session_printk(KERN_ERR, session,
+-						 "Too many iscsi targets. Max "
+-						 "number of targets is %d.\n",
+-						 ISCSI_MAX_TARGET - 1);
+-			goto release_host;
+-		}
+-	}
+-	session->target_id = id;
++	session->target_id = target_id;
+ 
+ 	snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ 		 session->sid);
+@@ -737,6 +529,10 @@ int iscsi_add_session(struct iscsi_cls_s
+ 	list_add(&session->sess_list, &sesslist);
+ 	spin_unlock_irqrestore(&sesslock, flags);
+ 
++	mutex_lock(&ihost->mutex);
++	list_add(&session->host_list, &ihost->sessions);
++	mutex_unlock(&ihost->mutex);
++
+ 	iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ 	return 0;
+ 
+@@ -750,18 +546,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+  * iscsi_create_session - create iscsi class session
+  * @shost: scsi host
+  * @transport: iscsi transport
+- * @dd_size: private driver data size
+  * @target_id: which target
+  *
+  * This can be called from a LLD or iscsi_transport.
+  */
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+-		     int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++		     struct iscsi_transport *transport,
++		     unsigned int target_id)
+ {
+ 	struct iscsi_cls_session *session;
+ 
+-	session = iscsi_alloc_session(shost, transport, dd_size);
++	session = iscsi_alloc_session(shost, transport);
+ 	if (!session)
+ 		return NULL;
+ 
+@@ -797,7 +593,7 @@ static int iscsi_iter_destroy_conn_fn(st
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+-	struct iscsi_cls_host *ihost = shost->shost_data;
++	struct iscsi_host *ihost = shost->shost_data;
+ 	unsigned long flags;
+ 	int err;
+ 
+@@ -863,7 +659,6 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session)
+ /**
+  * iscsi_create_conn - create iscsi class connection
+  * @session: iscsi cls session
+- * @dd_size: private driver data size
+  * @cid: connection id
+  *
+  * This can be called from a LLD or iscsi_transport. The connection
+@@ -876,17 +671,18 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session)
+  * non-zero.
+  */
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ 	struct iscsi_transport *transport = session->transport;
+ 	struct iscsi_cls_conn *conn;
+ 	unsigned long flags;
+ 	int err;
+ 
+-	conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++	conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ 	if (!conn)
+ 		return NULL;
+-	if (dd_size)
++
++	if (transport->conndata_size)
+ 		conn->dd_data = &conn[1];
+ 
+ 	INIT_LIST_HEAD(&conn->conn_list);
+@@ -1219,20 +1015,21 @@ int iscsi_session_event(struct iscsi_cls
+ EXPORT_SYMBOL_GPL(iscsi_session_event);
+ 
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+-			struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+-			uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ 	struct iscsi_transport *transport = priv->iscsi_transport;
+ 	struct iscsi_cls_session *session;
+-	uint32_t host_no;
++	uint32_t hostno;
+ 
+-	session = transport->create_session(ep, cmds_max, queue_depth,
+-					    initial_cmdsn, &host_no);
++	session = transport->create_session(transport, &priv->t,
++					    ev->u.c_session.cmds_max,
++					    ev->u.c_session.queue_depth,
++					    ev->u.c_session.initial_cmdsn,
++					    &hostno);
+ 	if (!session)
+ 		return -ENOMEM;
+ 
+-	ev->r.c_session_ret.host_no = host_no;
++	ev->r.c_session_ret.host_no = hostno;
+ 	ev->r.c_session_ret.sid = session->sid;
+ 	return 0;
+ }
+@@ -1307,7 +1104,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ 		      struct iscsi_uevent *ev, int msg_type)
+ {
+-	struct iscsi_endpoint *ep;
+ 	struct sockaddr *dst_addr;
+ 	int rc = 0;
+ 
+@@ -1317,33 +1113,22 @@ iscsi_if_transport_ep(struct iscsi_trans
+ 			return -EINVAL;
+ 
+ 		dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+-		ep = transport->ep_connect(dst_addr,
+-					   ev->u.ep_connect.non_blocking);
+-		if (IS_ERR(ep))
+-			return PTR_ERR(ep);
+-
+-		ev->r.ep_connect_ret.handle = ep->id;
++		rc = transport->ep_connect(dst_addr,
++					   ev->u.ep_connect.non_blocking,
++					   &ev->r.ep_connect_ret.handle);
+ 		break;
+ 	case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ 		if (!transport->ep_poll)
+ 			return -EINVAL;
+ 
+-		ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+-		if (!ep)
+-			return -EINVAL;
+-
+-		ev->r.retcode = transport->ep_poll(ep,
++		ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ 						   ev->u.ep_poll.timeout_ms);
+ 		break;
+ 	case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ 		if (!transport->ep_disconnect)
+ 			return -EINVAL;
+ 
+-		ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+-		if (!ep)
+-			return -EINVAL;
+-
+-		transport->ep_disconnect(ep);
++		transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ 		break;
+ 	}
+ 	return rc;
+@@ -1408,7 +1193,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+ 	struct iscsi_internal *priv;
+ 	struct iscsi_cls_session *session;
+ 	struct iscsi_cls_conn *conn;
+-	struct iscsi_endpoint *ep = NULL;
+ 
+ 	priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ 	if (!priv)
+@@ -1422,22 +1206,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+ 
+ 	switch (nlh->nlmsg_type) {
+ 	case ISCSI_UEVENT_CREATE_SESSION:
+-		err = iscsi_if_create_session(priv, ep, ev,
+-					      ev->u.c_session.initial_cmdsn,
+-					      ev->u.c_session.cmds_max,
+-					      ev->u.c_session.queue_depth);
+-		break;
+-	case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+-		ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+-		if (!ep) {
+-			err = -EINVAL;
+-			break;
+-		}
+-
+-		err = iscsi_if_create_session(priv, ep, ev,
+-					ev->u.c_bound_session.initial_cmdsn,
+-					ev->u.c_bound_session.cmds_max,
+-					ev->u.c_bound_session.queue_depth);
++		err = iscsi_if_create_session(priv, ev);
+ 		break;
+ 	case ISCSI_UEVENT_DESTROY_SESSION:
+ 		session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -1568,8 +1337,11 @@ iscsi_if_rx(struct sk_buff *skb)
+ 	mutex_unlock(&rx_queue_mutex);
+ }
+ 
++#define iscsi_cdev_to_conn(_cdev) \
++	iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store)		\
+-struct device_attribute dev_attr_##_prefix##_##_name =	\
++struct class_device_attribute class_device_attr_##_prefix##_##_name =	\
+ 	__ATTR(_name,_mode,_show,_store)
+ 
+ /*
+@@ -1577,10 +1349,9 @@ struct device_attribute dev_attr_##_pref
+  */
+ #define iscsi_conn_attr_show(param)					\
+ static ssize_t								\
+-show_conn_param_##param(struct device *dev, 				\
+-			struct device_attribute *attr, char *buf)	\
++show_conn_param_##param(struct class_device *cdev, char *buf)		\
+ {									\
+-	struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);	\
++	struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev);		\
+ 	struct iscsi_transport *t = conn->transport;			\
+ 	return t->get_conn_param(conn, param, buf);			\
+ }
+@@ -1604,16 +1375,17 @@ iscsi_conn_attr(address, ISCSI_PARAM_CON
+ iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+ iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+ 
++#define iscsi_cdev_to_session(_cdev) \
++	iscsi_dev_to_session(_cdev->dev)
++
+ /*
+  * iSCSI session attrs
+  */
+ #define iscsi_session_attr_show(param, perm)				\
+ static ssize_t								\
+-show_session_param_##param(struct device *dev,				\
+-			   struct device_attribute *attr, char *buf)	\
++show_session_param_##param(struct class_device *cdev, char *buf)	\
+ {									\
+-	struct iscsi_cls_session *session = 				\
+-		iscsi_dev_to_session(dev->parent);			\
++	struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ 	struct iscsi_transport *t = session->transport;			\
+ 									\
+ 	if (perm && !capable(CAP_SYS_ADMIN))				\
+@@ -1643,14 +1415,11 @@ iscsi_session_attr(password_in, ISCSI_PA
+ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+ iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+ 
+ static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+-			char *buf)
++show_priv_session_state(struct class_device *cdev, char *buf)
+ {
+-	struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++	struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ 	return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+@@ -1658,11 +1427,9 @@ static ISCSI_CLASS_ATTR(priv_sess, state
+ 
+ #define iscsi_priv_session_attr_show(field, format)			\
+ static ssize_t								\
+-show_priv_session_##field(struct device *dev, 				\
+-			  struct device_attribute *attr, char *buf)	\
++show_priv_session_##field(struct class_device *cdev, char *buf)		\
+ {									\
+-	struct iscsi_cls_session *session = 				\
+-			iscsi_dev_to_session(dev->parent);		\
++	struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ 	return sprintf(buf, format"\n", session->field);		\
+ }
+ 
+@@ -1677,10 +1444,9 @@ iscsi_priv_session_attr(recovery_tmo, "%
+  */
+ #define iscsi_host_attr_show(param)					\
+ static ssize_t								\
+-show_host_param_##param(struct device *dev, 				\
+-			struct device_attribute *attr, char *buf)	\
++show_host_param_##param(struct class_device *cdev, char *buf)		\
+ {									\
+-	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
++	struct Scsi_Host *shost = transport_class_to_shost(cdev);	\
+ 	struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ 	return priv->iscsi_transport->get_host_param(shost, param, buf); \
+ }
+@@ -1697,7 +1463,7 @@ iscsi_host_attr(initiatorname, ISCSI_HOS
+ 
+ #define SETUP_PRIV_SESSION_RD_ATTR(field)				\
+ do {									\
+-	priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++	priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ 	count++;							\
+ } while (0)
+ 
+@@ -1705,7 +1471,7 @@ do {									\
+ #define SETUP_SESSION_RD_ATTR(field, param_flag)			\
+ do {									\
+ 	if (tt->param_mask & param_flag) {				\
+-		priv->session_attrs[count] = &dev_attr_sess_##field; \
++		priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ 		count++;						\
+ 	}								\
+ } while (0)
+@@ -1713,7 +1479,7 @@ do {									\
+ #define SETUP_CONN_RD_ATTR(field, param_flag)				\
+ do {									\
+ 	if (tt->param_mask & param_flag) {				\
+-		priv->conn_attrs[count] = &dev_attr_conn_##field; \
++		priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ 		count++;						\
+ 	}								\
+ } while (0)
+@@ -1721,7 +1487,7 @@ do {									\
+ #define SETUP_HOST_RD_ATTR(field, param_flag)				\
+ do {									\
+ 	if (tt->host_param_mask & param_flag) {				\
+-		priv->host_attrs[count] = &dev_attr_host_##field; \
++		priv->host_attrs[count] = &class_device_attr_host_##field; \
+ 		count++;						\
+ 	}								\
+ } while (0)
+@@ -1811,24 +1577,22 @@ iscsi_register_transport(struct iscsi_tr
+ 	priv->daemon_pid = -1;
+ 	priv->iscsi_transport = tt;
+ 	priv->t.user_scan = iscsi_user_scan;
+-	if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+-		priv->t.create_work_queue = 1;
+ 
+-	priv->dev.class = &iscsi_transport_class;
+-	snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+-	err = device_register(&priv->dev);
++	priv->cdev.class = &iscsi_transport_class;
++	snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++	err = class_device_register(&priv->cdev);
+ 	if (err)
+ 		goto free_priv;
+ 
+-	err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++	err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ 	if (err)
+-		goto unregister_dev;
++		goto unregister_cdev;
+ 
+ 	/* host parameters */
+ 	priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ 	priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ 	priv->t.host_attrs.ac.match = iscsi_host_match;
+-	priv->t.host_size = sizeof(struct iscsi_cls_host);
++	priv->t.host_size = sizeof(struct iscsi_host);
+ 	transport_container_register(&priv->t.host_attrs);
+ 
+ 	SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+@@ -1886,8 +1650,6 @@ iscsi_register_transport(struct iscsi_tr
+ 	SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ 	SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ 	SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+-	SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+-	SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ 	SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ 	SETUP_PRIV_SESSION_RD_ATTR(state);
+ 
+@@ -1901,9 +1663,8 @@ iscsi_register_transport(struct iscsi_tr
+ 	printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ 	return &priv->t;
+ 
+-unregister_dev:
+-	device_unregister(&priv->dev);
+-	return NULL;
++unregister_cdev:
++	class_device_unregister(&priv->cdev);
+ free_priv:
+ 	kfree(priv);
+ 	return NULL;
+@@ -1930,8 +1691,8 @@ int iscsi_unregister_transport(struct is
+ 	transport_container_unregister(&priv->session_cont);
+ 	transport_container_unregister(&priv->t.host_attrs);
+ 
+-	sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+-	device_unregister(&priv->dev);
++	sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++	class_device_unregister(&priv->cdev);
+ 	mutex_unlock(&rx_queue_mutex);
+ 
+ 	return 0;
+@@ -1951,13 +1712,9 @@ static __init int iscsi_transport_init(v
+ 	if (err)
+ 		return err;
+ 
+-	err = class_register(&iscsi_endpoint_class);
+-	if (err)
+-		goto unregister_transport_class;
+-
+ 	err = transport_class_register(&iscsi_host_class);
+ 	if (err)
+-		goto unregister_endpoint_class;
++		goto unregister_transport_class;
+ 
+ 	err = transport_class_register(&iscsi_connection_class);
+ 	if (err)
+@@ -1967,8 +1724,8 @@ static __init int iscsi_transport_init(v
+ 	if (err)
+ 		goto unregister_conn_class;
+ 
+-	nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+-				    NULL, THIS_MODULE);
++	nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++			THIS_MODULE);
+ 	if (!nls) {
+ 		err = -ENOBUFS;
+ 		goto unregister_session_class;
+@@ -1988,8 +1745,6 @@ unregister_conn_class:
+ 	transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ 	transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+-	class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ 	class_unregister(&iscsi_transport_class);
+ 	return err;
+@@ -2002,7 +1757,6 @@ static void __exit iscsi_transport_exit(
+ 	transport_class_unregister(&iscsi_connection_class);
+ 	transport_class_unregister(&iscsi_session_class);
+ 	transport_class_unregister(&iscsi_host_class);
+-	class_unregister(&iscsi_endpoint_class);
+ 	class_unregister(&iscsi_transport_class);
+ }
+ 
+Index: ofed_kernel/include/scsi/libiscsi.h
+===================================================================
+--- ofed_kernel.orig/include/scsi/libiscsi.h
++++ ofed_kernel/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+ 
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+ 
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+ 
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -52,7 +49,9 @@ struct device;
+ #endif
+ 
+ #define ISCSI_DEF_XMIT_CMDS_MAX	128	/* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX	15
++#define ISCSI_MGMT_CMDS_MAX	16	/* must be power of 2 */
++
++#define ISCSI_MGMT_ITT_OFFSET	0xa00
+ 
+ #define ISCSI_DEF_CMD_PER_LUN		32
+ #define ISCSI_MAX_CMD_PER_LUN		128
+@@ -70,10 +69,7 @@ enum {
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT		1
+ 
+-#define ISCSI_ITT_MASK			(0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX		4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN		16
++#define ISCSI_ITT_MASK			(0xfff)
+ #define ISCSI_AGE_SHIFT			28
+ #define ISCSI_AGE_MASK			(0xf << ISCSI_AGE_SHIFT)
+ 
+@@ -86,6 +82,18 @@ enum {
+ 	ISCSI_DIGEST_SIZE = sizeof(__u32),
+ };
+ 
++struct iscsi_mgmt_task {
++	/*
++	 * Becuae LLDs allocate their hdr differently, this is a pointer to
++	 * that storage. It must be setup at session creation time.
++	 */
++	struct iscsi_hdr	*hdr;
++	char			*data;		/* mgmt payload */
++	unsigned		data_count;	/* counts data to be sent */
++	uint32_t		itt;		/* this ITT */
++	void			*dd_data;	/* driver/transport data */
++	struct list_head	running;
++};
+ 
+ enum {
+ 	ISCSI_TASK_COMPLETED,
+@@ -93,7 +101,7 @@ enum {
+ 	ISCSI_TASK_RUNNING,
+ };
+ 
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ 	/*
+ 	 * Because LLDs allocate their hdr differently, this is a pointer
+ 	 * and length to that storage. It must be setup at session
+@@ -110,7 +118,6 @@ struct iscsi_task {
+ 	/* offset in unsolicited stream (bytes); */
+ 	unsigned		unsol_offset;
+ 	unsigned		data_count;	/* remaining Data-Out */
+-	char			*data;		/* mgmt payload */
+ 	struct scsi_cmnd	*sc;		/* associated SCSI cmd*/
+ 	struct iscsi_conn	*conn;		/* used connection    */
+ 
+@@ -121,9 +128,9 @@ struct iscsi_task {
+ 	void			*dd_data;	/* driver/transport data */
+ };
+ 
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
++static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+ {
+-	return (void*)task->hdr + task->hdr_len;
++	return (void*)ctask->hdr + ctask->hdr_len;
+ }
+ 
+ /* Connection's states */
+@@ -139,6 +146,11 @@ struct iscsi_conn {
+ 	void			*dd_data;	/* iscsi_transport data */
+ 	struct iscsi_session	*session;	/* parent session */
+ 	/*
++	 * LLDs should set this lock. It protects the transport recv
++	 * code
++	 */
++	rwlock_t		*recv_lock;
++	/*
+ 	 * conn_stop() flag: stop to recover, stop to terminate
+ 	 */
+         int			stop_stage;
+@@ -147,7 +159,7 @@ struct iscsi_conn {
+ 	unsigned long		last_ping;
+ 	int			ping_timeout;
+ 	int			recv_timeout;
+-	struct iscsi_task 	*ping_task;
++	struct iscsi_mgmt_task	*ping_mtask;
+ 
+ 	/* iSCSI connection-wide sequencing */
+ 	uint32_t		exp_statsn;
+@@ -163,8 +175,9 @@ struct iscsi_conn {
+ 	 * should always fit in this buffer
+ 	 */
+ 	char			*data;
+-	struct iscsi_task 	*login_task;	/* mtask used for login/text */
+-	struct iscsi_task	*task;		/* xmit task in progress */
++	struct iscsi_mgmt_task	*login_mtask;	/* mtask used for login/text */
++	struct iscsi_mgmt_task	*mtask;		/* xmit mtask in progress */
++	struct iscsi_cmd_task	*ctask;		/* xmit ctask in progress */
+ 
+ 	/* xmit */
+ 	struct list_head	mgmtqueue;	/* mgmt (control) xmit queue */
+@@ -195,6 +208,9 @@ struct iscsi_conn {
+ 	/* remote portal currently connected to */
+ 	int			portal_port;
+ 	char			portal_address[ISCSI_ADDRESS_BUF_LEN];
++	/* local address */
++	int			local_port;
++	char			local_address[ISCSI_ADDRESS_BUF_LEN];
+ 
+ 	/* MIB-statistics */
+ 	uint64_t		txdata_octets;
+@@ -209,7 +225,6 @@ struct iscsi_conn {
+ 
+ 	/* custom statistics */
+ 	uint32_t		eh_abort_cnt;
+-	uint32_t		fmr_unalign_cnt;
+ };
+ 
+ struct iscsi_pool {
+@@ -230,7 +245,6 @@ enum {
+ };
+ 
+ struct iscsi_session {
+-	struct iscsi_cls_session *cls_session;
+ 	/*
+ 	 * Syncs up the scsi eh thread with the iscsi eh thread when sending
+ 	 * task management functions. This must be taken before the session
+@@ -266,8 +280,10 @@ struct iscsi_session {
+ 	char			*password;
+ 	char			*password_in;
+ 	char			*targetname;
+-	char			*ifacename;
+ 	char			*initiatorname;
++	/* hw address or netdev iscsi connection is bound to */
++	char			*hwaddress;
++	char			*netdev;
+ 	/* control data */
+ 	struct iscsi_transport	*tt;
+ 	struct Scsi_Host	*host;
+@@ -281,20 +297,12 @@ struct iscsi_session {
+ 	int			state;		/* session state           */
+ 	int			age;		/* counts session re-opens */
+ 
+-	int			scsi_cmds_max; 	/* max scsi commands */
+ 	int			cmds_max;	/* size of cmds array */
+-	struct iscsi_task	**cmds;		/* Original Cmds arr */
++	struct iscsi_cmd_task	**cmds;		/* Original Cmds arr */
+ 	struct iscsi_pool	cmdpool;	/* PDU's pool */
+-};
+-
+-struct iscsi_host {
+-	char			*initiatorname;
+-	/* hw address or netdev iscsi connection is bound to */
+-	char			*hwaddress;
+-	char			*netdev;
+-	/* local address */
+-	int			local_port;
+-	char			local_address[ISCSI_ADDRESS_BUF_LEN];
++	int			mgmtpool_max;	/* size of mgmt array */
++	struct iscsi_mgmt_task	**mgmt_cmds;	/* Original mgmt arr */
++	struct iscsi_pool	mgmtpool;	/* Mgmt PDU's pool */
+ };
+ 
+ /*
+@@ -307,44 +315,42 @@ extern int iscsi_eh_device_reset(struct 
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ 			      void (*done)(struct scsi_cmnd *));
+ 
++
+ /*
+  * iSCSI host helpers.
+  */
+-#define iscsi_host_priv(_shost) \
+-	(shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+ extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ 				enum iscsi_host_param param, char *buf,
+ 				int buflen);
+ extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ 				enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+-					  int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+ 
+ /*
+  * session management
+  */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+-		    uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++		    uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ 			   enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ 				   enum iscsi_param param, char *buf);
+ 
++#define session_to_cls(_sess) \
++	hostdata_session(_sess->host->hostdata)
++
+ #define iscsi_session_printk(prefix, _sess, fmt, a...)	\
+-	iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++	iscsi_cls_session_printk(prefix,		\
++		(struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+ 
+ /*
+  * connection management
+  */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+-					       int, uint32_t);
++					       uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,29 +359,25 @@ extern int iscsi_conn_bind(struct iscsi_
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ 				enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+ 
+ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
+-	iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+-			      fmt, ##a)
++	iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+ 
+ /*
+  * pdu and task processing
+  */
+ extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ 					struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ 				char *, uint32_t);
+ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ 			      char *, int);
+-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+-				char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++			    uint32_t *);
++extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
++extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++				 struct iscsi_mgmt_task *mtask);
+ 
+ /*
+  * generic helpers
+Index: ofed_kernel/include/scsi/scsi_transport_iscsi.h
+===================================================================
+--- ofed_kernel.orig/include/scsi/scsi_transport_iscsi.h
++++ ofed_kernel/include/scsi/scsi_transport_iscsi.h
+@@ -30,11 +30,11 @@
+ 
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+ 
+ /**
+@@ -58,22 +58,19 @@ struct sockaddr;
+  * @stop_conn:		suspend/recover/terminate connection
+  * @send_pdu:		send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+  * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task:		Initialize a iscsi_task and any internal structs.
+- *			When offloading the data path, this is called from
+- *			queuecommand with the session lock, or from the
+- *			iscsi_conn_send_pdu context with the session lock.
+- *			When not offloading the data path, this is called
+- *			from the scsi work queue without the session lock.
+- * @xmit_task		Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task:	Initialize a iscsi_cmd_task and any internal structs.
++ *			Called from queuecommand with session lock held.
++ * @init_mgmt_task:	Initialize a iscsi_mgmt_task and any internal structs.
++ *			Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task:	Requests LLD to transfer cmd task. Returns 0 or the
+  *			the number of bytes transferred on success, and -Exyz
+- *			value on error. When offloading the data path, this
+- *			is called from queuecommand with the session lock, or
+- *			from the iscsi_conn_send_pdu context with the session
+- *			lock. When not offloading the data path, this is called
+- *			from the scsi work queue without the session lock.
+- * @cleanup_task:	requests LLD to fail task. Called with session lock
+- *			and after the connection has been suspended and
+- *			terminated during recovery. If called
++ *			value on error.
++ * @xmit_mgmt_task:	Requests LLD to transfer mgmt task. Returns 0 or the
++ *			the number of bytes transferred on success, and -Exyz
++ *			value on error.
++ * @cleanup_cmd_task:	requests LLD to fail cmd task. Called with xmitmutex
++ *			and session->lock after the connection has been
++ *			suspended and terminated during recovery. If called
+  *			from abort task then connection is not suspended
+  *			or terminated but sk_callback_lock is held
+  *
+@@ -86,9 +83,17 @@ struct iscsi_transport {
+ 	/* LLD sets this to indicate what values it can export to sysfs */
+ 	uint64_t param_mask;
+ 	uint64_t host_param_mask;
+-	struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+-					uint16_t cmds_max, uint16_t qdepth,
+-					uint32_t sn, uint32_t *hn);
++	struct scsi_host_template *host_template;
++	/* LLD connection data size */
++	int conndata_size;
++	/* LLD session data size */
++	int sessiondata_size;
++	int max_lun;
++	unsigned int max_conn;
++	unsigned int max_cmd_len;
++	struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++		struct scsi_transport_template *t, uint16_t, uint16_t,
++		uint32_t sn, uint32_t *hn);
+ 	void (*destroy_session) (struct iscsi_cls_session *session);
+ 	struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ 				uint32_t cid);
+@@ -113,15 +118,20 @@ struct iscsi_transport {
+ 			 char *data, uint32_t data_size);
+ 	void (*get_stats) (struct iscsi_cls_conn *conn,
+ 			   struct iscsi_stats *stats);
+-	int (*init_task) (struct iscsi_task *task);
+-	int (*xmit_task) (struct iscsi_task *task);
+-	void (*cleanup_task) (struct iscsi_conn *conn,
+-				  struct iscsi_task *task);
++	int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++	void (*init_mgmt_task) (struct iscsi_conn *conn,
++				struct iscsi_mgmt_task *mtask);
++	int (*xmit_cmd_task) (struct iscsi_conn *conn,
++			      struct iscsi_cmd_task *ctask);
++	void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++				  struct iscsi_cmd_task *ctask);
++	int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++			       struct iscsi_mgmt_task *mtask);
+ 	void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+-	struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+-					      int non_blocking);
+-	int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+-	void (*ep_disconnect) (struct iscsi_endpoint *ep);
++	int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++			   uint64_t *ep_handle);
++	int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++	void (*ep_disconnect) (uint64_t ep_handle);
+ 	int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
+ 			  uint32_t enable, struct sockaddr *dst_addr);
+ };
+@@ -162,10 +172,9 @@ enum {
+ 	ISCSI_SESSION_FREE,
+ };
+ 
+-#define ISCSI_MAX_TARGET -1
+-
+ struct iscsi_cls_session {
+ 	struct list_head sess_list;		/* item in session_list */
++	struct list_head host_list;
+ 	struct iscsi_transport *transport;
+ 	spinlock_t lock;
+ 	struct work_struct block_work;
+@@ -177,7 +186,7 @@ struct iscsi_cls_session {
+ 	int recovery_tmo;
+ 	struct delayed_work recovery_work;
+ 
+-	unsigned int target_id;
++	int target_id;
+ 
+ 	int state;
+ 	int sid;				/* session id */
+@@ -194,20 +203,12 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ 	iscsi_dev_to_session(_stgt->dev.parent)
+ 
+-struct iscsi_cls_host {
++struct iscsi_host {
++	struct list_head sessions;
+ 	atomic_t nr_scans;
+ 	struct mutex mutex;
+ 	struct workqueue_struct *scan_workq;
+-	char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+-				void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+-	void *dd_data;			/* LLD private data */
+-	struct device dev;
+-	unsigned int id;
++	char scan_workq_name[KOBJ_NAME_LEN];
+ };
+ 
+ /*
+@@ -221,26 +222,22 @@ struct iscsi_endpoint {
+ 
+ extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+-				struct iscsi_transport *transport, int dd_size);
++					struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ 			     unsigned int target_id);
+ extern int iscsi_session_event(struct iscsi_cls_session *session,
+ 			       enum iscsi_uevent_e event);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ 						struct iscsi_transport *t,
+-						int dd_size,
+ 						unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+-						int dd_size, uint32_t cid);
++					    uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+ 
+ #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_02_count_fmr_align_violations.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_02_count_fmr_align_violations.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_02_count_fmr_align_violations.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,25 @@
+From 02753dd2caabfe6b1885cb80a8fb8532b416108d Mon Sep 17 00:00:00 2001
+From: Eli Dorfman <elid at voltaire.com>
+Date: Tue, 29 Apr 2008 10:12:39 +0300
+Subject: [PATCH] IB/iSER: Count fmr alignment violations per session
+
+Count fmr alignment violations per session
+as part of the iscsi statistics.
+
+Signed-off-by: Eli Dorfman <elid at voltaire.com>
+---
+ include/scsi/libiscsi.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+Index: ofed_kernel/include/scsi/libiscsi.h
+===================================================================
+--- ofed_kernel.orig/include/scsi/libiscsi.h
++++ ofed_kernel/include/scsi/libiscsi.h
+@@ -225,6 +225,7 @@ struct iscsi_conn {
+ 
+ 	/* custom statistics */
+ 	uint32_t		eh_abort_cnt;
++	uint32_t		fmr_unalign_cnt;
+ };
+ 
+ struct iscsi_pool {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,160 @@
+From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 29 Jun 2008 15:41:12 +0300
+Subject: [PATCH] copmat patch for RHEL5 and SLES10
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c |   97 +++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 40 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
+@@ -20,6 +20,8 @@
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+  */
++#include <linux/version.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <net/tcp.h>
+@@ -378,8 +380,10 @@ static void __iscsi_unblock_session(stru
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ 	struct iscsi_host *ihost = shost->shost_data;
++#endif
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -397,10 +401,12 @@ static void __iscsi_unblock_session(stru
+ 	 * the async scanning code (drivers like iscsi_tcp do login and
+ 	 * scanning from userspace).
+ 	 */
+-	if (shost->hostt->scan_finished) {
+-		if (queue_work(ihost->scan_workq, &session->scan_work))
+-			atomic_inc(&ihost->nr_scans);
+-	}
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
++		if (shost->hostt->scan_finished) {
++			if (queue_work(ihost->scan_workq, &session->scan_work))
++				atomic_inc(&ihost->nr_scans);
++		}
++#endif
+ }
+ 
+ /**
+@@ -1294,45 +1300,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+  * Malformed skbs with wrong lengths or invalid creds are not processed.
+  */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++	struct sk_buff *skb;
++
+ 	mutex_lock(&rx_queue_mutex);
+-	while (skb->len >= NLMSG_SPACE(0)) {
+-		int err;
+-		uint32_t rlen;
+-		struct nlmsghdr	*nlh;
+-		struct iscsi_uevent *ev;
+-
+-		nlh = nlmsg_hdr(skb);
+-		if (nlh->nlmsg_len < sizeof(*nlh) ||
+-		    skb->len < nlh->nlmsg_len) {
+-			break;
++	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++		if (NETLINK_CREDS(skb)->uid) {
++			skb_pull(skb, skb->len);
++			goto free_skb;
+ 		}
+ 
+-		ev = NLMSG_DATA(nlh);
+-		rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+-		if (rlen > skb->len)
+-			rlen = skb->len;
+-
+-		err = iscsi_if_recv_msg(skb, nlh);
+-		if (err) {
+-			ev->type = ISCSI_KEVENT_IF_ERROR;
+-			ev->iferror = err;
+-		}
+-		do {
+-			/*
+-			 * special case for GET_STATS:
+-			 * on success - sending reply and stats from
+-			 * inside of if_recv_msg(),
+-			 * on error - fall through.
+-			 */
+-			if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++		while (skb->len >= NLMSG_SPACE(0)) {
++			int err;
++			uint32_t rlen;
++			struct nlmsghdr	*nlh;
++			struct iscsi_uevent *ev;
++
++			nlh = nlmsg_hdr(skb);
++			if (nlh->nlmsg_len < sizeof(*nlh) ||
++			    skb->len < nlh->nlmsg_len) {
+ 				break;
+-			err = iscsi_if_send_reply(
+-				NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+-				nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+-		} while (err < 0 && err != -ECONNREFUSED);
+-		skb_pull(skb, rlen);
++			}
++
++			ev = NLMSG_DATA(nlh);
++			rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++			if (rlen > skb->len)
++				rlen = skb->len;
++
++			err = iscsi_if_recv_msg(skb, nlh);
++			if (err) {
++				ev->type = ISCSI_KEVENT_IF_ERROR;
++				ev->iferror = err;
++			}
++			do {
++				/*
++				 * special case for GET_STATS:
++				 * on success - sending reply and stats from
++				 * inside of if_recv_msg(),
++				 * on error - fall through.
++				 */
++				if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++					break;
++				err = iscsi_if_send_reply(
++					NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++					nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++			} while (err < 0 && err != -ECONNREFUSED);
++			skb_pull(skb, rlen);
++		}
++free_skb:
++		kfree_skb(skb);
+ 	}
+ 	mutex_unlock(&rx_queue_mutex);
+ }
+@@ -1738,7 +1755,7 @@ static __init int iscsi_transport_init(v
+ 	return 0;
+ 
+ release_nls:
+-	netlink_kernel_release(nls);
++	sock_release(nls->sk_socket);
+ unregister_session_class:
+ 	transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+@@ -1753,7 +1770,7 @@ unregister_transport_class:
+ static void __exit iscsi_transport_exit(void)
+ {
+ 	destroy_workqueue(iscsi_eh_timer_workq);
+-	netlink_kernel_release(nls);
++	sock_release(nls->sk_socket);
+ 	transport_class_unregister(&iscsi_connection_class);
+ 	transport_class_unregister(&iscsi_session_class);
+ 	transport_class_unregister(&iscsi_host_class);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_01_sync_kernel_code_with_2.6.26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_01_sync_kernel_code_with_2.6.26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_01_sync_kernel_code_with_2.6.26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,1548 @@
+From ad1e1df62ff096cc90257b0b42e843d0773ae981 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 11:37:50 +0300
+Subject: [PATCH] iser backports
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c     |  363 ++++++++++++---------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h     |   46 +--
+ drivers/infiniband/ulp/iser/iser_initiator.c |  211 +++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c    |   79 ++---
+ drivers/infiniband/ulp/iser/iser_verbs.c     |   31 +-
+ 5 files changed, 347 insertions(+), 383 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/iser/iscsi_iser.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ ofed_kernel/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+  *	Zhenyu Wang
+  * Modified by:
+  *      Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+  */
+ 
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+ 
+ #include "iscsi_iser.h"
+ 
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+ 
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ 		struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ 	int rc = 0;
++	uint32_t ret_itt;
+ 	int datalen;
+ 	int ahslen;
+ 
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ 	/* read AHS */
+ 	ahslen = hdr->hlength * 4;
+ 
+-	rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++	/* verify itt (itt encoding: age+cid+itt) */
++	rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++	if (!rc)
++		rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ 	if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ 		goto error;
+ 
+@@ -121,33 +126,25 @@ error:
+ 
+ 
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+  *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_iser_conn *iser_conn  = task->conn->dd_data;
+-	struct iscsi_iser_task *iser_task = task->dd_data;
+-
+-	/* mgmt task */
+-	if (!task->sc) {
+-		iser_task->desc.data = task->data;
+-		return 0;
+-	}
++	struct iscsi_iser_conn     *iser_conn  = ctask->conn->dd_data;
++	struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ 
+-	iser_task->command_sent = 0;
+-	iser_task->iser_conn    = iser_conn;
+-	iser_task_rdma_init(iser_task);
++	iser_ctask->command_sent = 0;
++	iser_ctask->iser_conn    = iser_conn;
++	iser_ctask_rdma_init(iser_ctask);
+ 	return 0;
+ }
+ 
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+  * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+  *
+  * Notes:
+  *	The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *
+  *
+  **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++		      struct iscsi_mgmt_task *mtask)
+ {
+ 	int error = 0;
+ 
+-	debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++	debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+ 
+-	error = iser_send_control(conn, task);
++	error = iser_send_control(conn, mtask);
+ 
+-	/* since iser xmits control with zero copy, tasks can not be recycled
++	/* since iser xmits control with zero copy, mtasks can not be recycled
+ 	 * right after sending them.
+ 	 * The recycling scheme is based on whether a response is expected
+-	 * - if yes, the task is recycled at iscsi_complete_pdu
+-	 * - if no,  the task is recycled at iser_snd_completion
++	 * - if yes, the mtask is recycled at iscsi_complete_pdu
++	 * - if no,  the mtask is recycled at iser_snd_completion
+ 	 */
+ 	if (error && error != -ENOBUFS)
+ 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn 
+ }
+ 
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+-				 struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++				 struct iscsi_cmd_task *ctask)
+ {
+ 	struct iscsi_data  hdr;
+ 	int error = 0;
+ 
+ 	/* Send data-out PDUs while there's still unsolicited data to send */
+-	while (task->unsol_count > 0) {
+-		iscsi_prep_unsolicit_data_pdu(task, &hdr);
++	while (ctask->unsol_count > 0) {
++		iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ 		debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+-			   hdr.itt, task->data_count);
++			   hdr.itt, ctask->data_count);
+ 
+ 		/* the buffer description has been passed with the command */
+ 		/* Send the command */
+-		error = iser_send_data_out(conn, task, &hdr);
++		error = iser_send_data_out(conn, ctask, &hdr);
+ 		if (error) {
+-			task->unsol_datasn--;
+-			goto iscsi_iser_task_xmit_unsol_data_exit;
++			ctask->unsol_datasn--;
++			goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ 		}
+-		task->unsol_count -= task->data_count;
++		ctask->unsol_count -= ctask->data_count;
+ 		debug_scsi("Need to send %d more as data-out PDUs\n",
+-			   task->unsol_count);
++			   ctask->unsol_count);
+ 	}
+ 
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ 	return error;
+ }
+ 
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++		      struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_conn *conn = task->conn;
+-	struct iscsi_iser_task *iser_task = task->dd_data;
++	struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ 	int error = 0;
+ 
+-	if (!task->sc)
+-		return iscsi_iser_mtask_xmit(conn, task);
+-
+-	if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+-		BUG_ON(scsi_bufflen(task->sc) == 0);
++	if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++		BUG_ON(scsi_bufflen(ctask->sc) == 0);
+ 
+ 		debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+-			   task->itt, scsi_bufflen(task->sc),
+-			   task->imm_count, task->unsol_count);
++			   ctask->itt, scsi_bufflen(ctask->sc),
++			   ctask->imm_count, ctask->unsol_count);
+ 	}
+ 
+-	debug_scsi("task deq [cid %d itt 0x%x]\n",
+-		   conn->id, task->itt);
++	debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++		   conn->id, ctask->itt);
+ 
+ 	/* Send the cmd PDU */
+-	if (!iser_task->command_sent) {
+-		error = iser_send_command(conn, task);
++	if (!iser_ctask->command_sent) {
++		error = iser_send_command(conn, ctask);
+ 		if (error)
+-			goto iscsi_iser_task_xmit_exit;
+-		iser_task->command_sent = 1;
++			goto iscsi_iser_ctask_xmit_exit;
++		iser_ctask->command_sent = 1;
+ 	}
+ 
+ 	/* Send unsolicited data-out PDU(s) if necessary */
+-	if (task->unsol_count)
+-		error = iscsi_iser_task_xmit_unsol_data(conn, task);
++	if (ctask->unsol_count)
++		error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+ 
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ 	if (error && error != -ENOBUFS)
+ 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ 	return error;
+ }
+ 
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+-	struct iscsi_iser_task *iser_task = task->dd_data;
++	struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ 
+-	/* mgmt tasks do not need special cleanup */
+-	if (!task->sc)
+-		return;
++	if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++		iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++		iser_ctask_rdma_finalize(iser_ctask);
++	}
++}
+ 
+-	if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+-		iser_task->status = ISER_TASK_STATUS_COMPLETED;
+-		iser_task_rdma_finalize(iser_task);
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++	struct iser_conn *ib_conn;
++	struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
++
++	mutex_lock(&ig.connlist_mutex);
++	list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++		if (ib_conn == uib_conn) {
++			mutex_unlock(&ig.connlist_mutex);
++			return ib_conn;
++		}
+ 	}
++	mutex_unlock(&ig.connlist_mutex);
++	iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++	return NULL;
+ }
+ 
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_
+ 	struct iscsi_cls_conn *cls_conn;
+ 	struct iscsi_iser_conn *iser_conn;
+ 
+-	cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++	cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ 	if (!cls_conn)
+ 		return NULL;
+ 	conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_
+ 	 */
+ 	conn->max_recv_dlength = 128;
+ 
+-	iser_conn = conn->dd_data;
++	iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++	if (!iser_conn)
++		goto conn_alloc_fail;
++
++	/* currently this is the only field which need to be initiated */
++	rwlock_init(&iser_conn->lock);
++
+ 	conn->dd_data = iser_conn;
+ 	iser_conn->iscsi_conn = conn;
+ 
+ 	return cls_conn;
++
++conn_alloc_fail:
++	iscsi_conn_teardown(cls_conn);
++	return NULL;
+ }
+ 
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls
+ {
+ 	struct iscsi_conn *conn = cls_conn->dd_data;
+ 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+-	struct iser_conn *ib_conn = iser_conn->ib_conn;
+ 
+ 	iscsi_conn_teardown(cls_conn);
+-	/*
+-	 * Userspace will normally call the stop callback and
+-	 * already have freed the ib_conn, but if it goofed up then
+-	 * we free it here.
+-	 */
+-	if (ib_conn) {
+-		ib_conn->iser_conn = NULL;
+-		iser_conn_put(ib_conn);
+-	}
++	if (iser_conn->ib_conn)
++		iser_conn->ib_conn->iser_conn = NULL;
++	kfree(iser_conn);
+ }
+ 
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_se
+ 	struct iscsi_conn *conn = cls_conn->dd_data;
+ 	struct iscsi_iser_conn *iser_conn;
+ 	struct iser_conn *ib_conn;
+-	struct iscsi_endpoint *ep;
+ 	int error;
+ 
+ 	error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_se
+ 
+ 	/* the transport ep handle comes from user space so it must be
+ 	 * verified against the global ib connections list */
+-	ep = iscsi_lookup_endpoint(transport_eph);
+-	if (!ep) {
++	ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++	if (!ib_conn) {
+ 		iser_err("can't bind eph %llx\n",
+ 			 (unsigned long long)transport_eph);
+ 		return -EINVAL;
+ 	}
+-	ib_conn = ep->dd_data;
+-
+ 	/* binds the iSER connection retrieved from the previously
+ 	 * connected ep_handle to the iSCSI layer connection. exchanges
+ 	 * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_se
+ 	iser_conn = conn->dd_data;
+ 	ib_conn->iser_conn = iser_conn;
+ 	iser_conn->ib_conn  = ib_conn;
+-	iser_conn_get(ib_conn);
+-	return 0;
+-}
+ 
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+-	struct iscsi_conn *conn = cls_conn->dd_data;
+-	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+-	struct iser_conn *ib_conn = iser_conn->ib_conn;
++	conn->recv_lock = &iser_conn->lock;
+ 
+-	/*
+-	 * Userspace may have goofed up and not bound the connection or
+-	 * might have only partially setup the connection.
+-	 */
+-	if (ib_conn) {
+-		iscsi_conn_stop(cls_conn, flag);
+-		/*
+-		 * There is no unbind event so the stop callback
+-		 * must release the ref from the bind.
+-		 */
+-		iser_conn_put(ib_conn);
+-	}
+-	iser_conn->ib_conn = NULL;
++	return 0;
+ }
+ 
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_c
+ 	return iscsi_conn_start(cls_conn);
+ }
+ 
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+-	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+-	iscsi_host_remove(shost);
+-	iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+ 
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+-			  uint16_t cmds_max, uint16_t qdepth,
+-			  uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++			 struct scsi_transport_template *scsit,
++			 uint16_t cmds_max, uint16_t qdepth,
++			 uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ 	struct iscsi_cls_session *cls_session;
+ 	struct iscsi_session *session;
+-	struct Scsi_Host *shost;
+ 	int i;
+-	struct iscsi_task *task;
+-	struct iscsi_iser_task *iser_task;
+-	struct iser_conn *ib_conn;
+-
+-	shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+-	if (!shost)
+-		return NULL;
+-	shost->transportt = iscsi_iser_scsi_transport;
+-	shost->max_lun = iscsi_max_lun;
+-	shost->max_id = 0;
+-	shost->max_channel = 0;
+-	shost->max_cmd_len = 16;
+-
+-	/*
+-	 * older userspace tools (before 2.0-870) did not pass us
+-	 * the leading conn's ep so this will be NULL;
+-	 */
+-	if (ep)
+-		ib_conn = ep->dd_data;
+-
+-	if (iscsi_host_add(shost,
+-			   ep ? ib_conn->device->ib_device->dma_device : NULL))
+-		goto free_host;
+-	*hostno = shost->host_no;
++	uint32_t hn;
++	struct iscsi_cmd_task  *ctask;
++	struct iscsi_mgmt_task *mtask;
++	struct iscsi_iser_cmd_task *iser_ctask;
++	struct iser_desc *desc;
+ 
+ 	/*
+ 	 * we do not support setting can_queue cmd_per_lun from userspace yet
+ 	 * because we preallocate so many resources
+ 	 */
+-	cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++	cls_session = iscsi_session_setup(iscsit, scsit,
+ 					  ISCSI_DEF_XMIT_CMDS_MAX,
+-					  sizeof(struct iscsi_iser_task),
+-					  initial_cmdsn, 0);
++					  ISCSI_MAX_CMD_PER_LUN,
++					  sizeof(struct iscsi_iser_cmd_task),
++					  sizeof(struct iser_desc),
++					  initial_cmdsn, &hn);
+ 	if (!cls_session)
+-		goto remove_host;
+-	session = cls_session->dd_data;
++	return NULL;
++
++	*hostno = hn;
++	session = class_to_transport_session(cls_session);
+ 
+-	shost->can_queue = session->scsi_cmds_max;
+ 	/* libiscsi setup itts, data and pool so just set desc fields */
+ 	for (i = 0; i < session->cmds_max; i++) {
+-		task = session->cmds[i];
+-		iser_task = task->dd_data;
+-		task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+-		task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++		ctask      = session->cmds[i];
++		iser_ctask = ctask->dd_data;
++		ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++		ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ 	}
+-	return cls_session;
+ 
+-remove_host:
+-	iscsi_host_remove(shost);
+-free_host:
+-	iscsi_host_free(shost);
+-	return NULL;
++	for (i = 0; i < session->mgmtpool_max; i++) {
++		mtask      = session->mgmt_cmds[i];
++		desc       = mtask->dd_data;
++		mtask->hdr = &desc->iscsi_header;
++		desc->data = mtask->data;
++	}
++
++	return cls_session;
+ }
+ 
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_c
+ 	stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+ 
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++		      __u64 *ep_handle)
+ {
+ 	int err;
+ 	struct iser_conn *ib_conn;
+-	struct iscsi_endpoint *ep;
+ 
+-	ep = iscsi_create_endpoint(sizeof(*ib_conn));
+-	if (!ep)
+-		return ERR_PTR(-ENOMEM);
+-
+-	ib_conn = ep->dd_data;
+-	ib_conn->ep = ep;
+-	iser_conn_init(ib_conn);
+-
+-	err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+-			   non_blocking);
+-	if (err) {
+-		iscsi_destroy_endpoint(ep);
+-		return ERR_PTR(err);
+-	}
+-	return ep;
++	err = iser_conn_init(&ib_conn);
++	if (err)
++		goto out;
++
++	err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++	if (!err)
++		*ep_handle = (__u64)(unsigned long)ib_conn;
++
++out:
++	return err;
+ }
+ 
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+-	struct iser_conn *ib_conn;
++	struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ 	int rc;
+ 
+-	ib_conn = ep->dd_data;
++	if (!ib_conn)
++		return -EINVAL;
++
+ 	rc = wait_event_interruptible_timeout(ib_conn->wait,
+ 			     ib_conn->state == ISER_CONN_UP,
+ 			     msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint
+ }
+ 
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ 	struct iser_conn *ib_conn;
+ 
+-	ib_conn = ep->dd_data;
+-	if (ib_conn->iser_conn)
+-		/*
+-		 * Must suspend xmit path if the ep is bound to the
+-		 * iscsi_conn, so we know we are not accessing the ib_conn
+-		 * when we free it.
+-		 *
+-		 * This may not be bound if the ep poll failed.
+-		 */
+-		iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++	ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++	if (!ib_conn)
++		return;
+ 
+ 	iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ 	iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_i
+ 	.name                   = "iSCSI Initiator over iSER, v." DRV_VER,
+ 	.queuecommand           = iscsi_queuecommand,
+ 	.change_queue_depth	= iscsi_change_queue_depth,
++	.can_queue		= ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ 	.sg_tablesize           = ISCSI_ISER_SG_TABLESIZE,
+ 	.max_sectors		= 1024,
+ 	.cmd_per_lun            = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser
+ 				  ISCSI_USERNAME | ISCSI_PASSWORD |
+ 				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ 				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+-				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
+-				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++				  ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
+ 				  ISCSI_HOST_NETDEV_NAME |
+ 				  ISCSI_HOST_INITIATOR_NAME,
++	.host_template          = &iscsi_iser_sht,
++	.conndata_size		= sizeof(struct iscsi_conn),
++	.max_lun                = ISCSI_ISER_MAX_LUN,
++	.max_cmd_len            = ISCSI_ISER_MAX_CMD_LEN,
+ 	/* session management */
+ 	.create_session         = iscsi_iser_session_create,
+-	.destroy_session        = iscsi_iser_session_destroy,
++	.destroy_session        = iscsi_session_teardown,
+ 	/* connection management */
+ 	.create_conn            = iscsi_iser_conn_create,
+ 	.bind_conn              = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser
+ 	.get_conn_param		= iscsi_conn_get_param,
+ 	.get_session_param	= iscsi_session_get_param,
+ 	.start_conn             = iscsi_iser_conn_start,
+-	.stop_conn              = iscsi_iser_conn_stop,
++	.stop_conn              = iscsi_conn_stop,
+ 	/* iscsi host params */
+ 	.get_host_param		= iscsi_host_get_param,
+ 	.set_host_param		= iscsi_host_set_param,
+ 	/* IO */
+ 	.send_pdu		= iscsi_conn_send_pdu,
+ 	.get_stats		= iscsi_iser_conn_get_stats,
+-	.init_task		= iscsi_iser_task_init,
+-	.xmit_task		= iscsi_iser_task_xmit,
+-	.cleanup_task		= iscsi_iser_cleanup_task,
++	.init_cmd_task		= iscsi_iser_cmd_init,
++	.xmit_cmd_task		= iscsi_iser_ctask_xmit,
++	.xmit_mgmt_task		= iscsi_iser_mtask_xmit,
++	.cleanup_cmd_task	= iscsi_iser_cleanup_ctask,
+ 	/* recovery */
+ 	.session_recovery_timedout = iscsi_session_recovery_timedout,
+ 
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ 		return -EINVAL;
+ 	}
+ 
++	iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ 	memset(&ig, 0, sizeof(struct iser_global));
+ 
+ 	ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ 	mutex_init(&ig.connlist_mutex);
+ 	INIT_LIST_HEAD(&ig.connlist);
+ 
+-	iscsi_iser_scsi_transport = iscsi_register_transport(
+-							&iscsi_iser_transport);
+-	if (!iscsi_iser_scsi_transport) {
++	if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ 		iser_err("iscsi_register_transport failed\n");
+ 		err = -EINVAL;
+ 		goto register_transport_failure;
+Index: ofed_kernel/drivers/infiniband/ulp/iser/iscsi_iser.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ ofed_kernel/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+  */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ 					/* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE         (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN		256
++#define ISCSI_ISER_MAX_CMD_LEN		16
+ 
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+ 
+ struct iser_mem_reg {
+ 	u32  lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN	2
+ 
+ struct iser_dto {
+-	struct iscsi_iser_task *task;
++	struct iscsi_iser_cmd_task *ctask;
+ 	struct iser_conn *ib_conn;
+ 	int                        notify_enable;
+ 
+@@ -240,9 +242,7 @@ struct iser_device {
+ 
+ struct iser_conn {
+ 	struct iscsi_iser_conn       *iser_conn; /* iser conn for upcalls  */
+-	struct iscsi_endpoint	     *ep;
+ 	enum iser_ib_conn_state	     state;	    /* rdma connection state   */
+-	atomic_t		     refcount;
+ 	spinlock_t		     lock;	    /* used for state changes  */
+ 	struct iser_device           *device;       /* device context          */
+ 	struct rdma_cm_id            *cma_id;       /* CMA ID		       */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ 	struct iscsi_conn            *iscsi_conn;/* ptr to iscsi conn */
+ 	struct iser_conn             *ib_conn;   /* iSER IB conn      */
++
++	rwlock_t		     lock;
+ };
+ 
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ 	struct iser_desc             desc;
+ 	struct iscsi_iser_conn	     *iser_conn;
+ 	enum iser_task_status 	     status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+ 
+-int iser_send_control(struct iscsi_conn *conn,
+-		      struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn      *conn,
++		      struct iscsi_mgmt_task *mtask);
+ 
+-int iser_send_command(struct iscsi_conn *conn,
+-		      struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn      *conn,
++		      struct iscsi_cmd_task  *ctask);
+ 
+-int iser_send_data_out(struct iscsi_conn *conn,
+-		       struct iscsi_task *task,
+-		       struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn     *conn,
++		       struct iscsi_cmd_task *ctask,
++		       struct iscsi_data          *hdr);
+ 
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ 		     struct iscsi_hdr       *hdr,
+ 		     char                   *rx_data,
+ 		     int                    rx_data_len);
+ 
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int  iser_conn_init(struct iser_conn **ib_conn);
+ 
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+ 
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_des
+ 
+ void iser_snd_completion(struct iser_desc *desc);
+ 
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task     *ctask);
+ 
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+ 
+ void iser_dto_buffs_release(struct iser_dto *dto);
+ 
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device 
+ 		     struct iser_regd_buf    *regd_buf,
+ 		     enum dma_data_direction direction);
+ 
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ 				     enum iser_data_dir         cmd_dir);
+ 
+-int  iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int  iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ 		       enum   iser_data_dir        cmd_dir);
+ 
+ int  iser_connect(struct iser_conn   *ib_conn,
+@@ -359,10 +357,10 @@ int  iser_post_send(struct iser_desc *tx
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ 			 enum iser_ib_conn_state comp);
+ 
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ 			    struct iser_data_buf       *data,
+ 			    enum   iser_data_dir       iser_dir,
+ 			    enum   dma_data_direction  dma_dir);
+ 
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+Index: ofed_kernel/drivers/infiniband/ulp/iser/iser_initiator.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/iser/iser_initiator.c
++++ ofed_kernel/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+  */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struc
+ 
+ /* Register user buffer memory and initialize passive rdma
+  *  dto descriptor. Total data size is stored in
+- *  iser_task->data[ISER_DIR_IN].data_len
++ *  iser_ctask->data[ISER_DIR_IN].data_len
+  */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ 				 unsigned int edtl)
+ 
+ {
+-	struct iscsi_iser_task *iser_task = task->dd_data;
++	struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ 	struct iser_regd_buf *regd_buf;
+ 	int err;
+-	struct iser_hdr *hdr = &iser_task->desc.iser_header;
+-	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++	struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++	struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+ 
+-	err = iser_dma_map_task_data(iser_task,
++	err = iser_dma_map_task_data(iser_ctask,
+ 				     buf_in,
+ 				     ISER_DIR_IN,
+ 				     DMA_FROM_DEVICE);
+ 	if (err)
+ 		return err;
+ 
+-	if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++	if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ 		iser_err("Total data length: %ld, less than EDTL: "
+ 			 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+-			 iser_task->data[ISER_DIR_IN].data_len, edtl,
+-			 task->itt, iser_task->iser_conn);
++			 iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++			 ctask->itt, iser_ctask->iser_conn);
+ 		return -EINVAL;
+ 	}
+ 
+-	err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++	err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ 	if (err) {
+ 		iser_err("Failed to set up Data-IN RDMA\n");
+ 		return err;
+ 	}
+-	regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++	regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ 
+ 	hdr->flags    |= ISER_RSV;
+ 	hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ 	hdr->read_va   = cpu_to_be64(regd_buf->reg.va);
+ 
+ 	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+-		 task->itt, regd_buf->reg.rkey,
++		 ctask->itt, regd_buf->reg.rkey,
+ 		 (unsigned long long)regd_buf->reg.va);
+ 
+ 	return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct 
+ 
+ /* Register user buffer memory and initialize passive rdma
+  *  dto descriptor. Total data size is stored in
+- *  task->data[ISER_DIR_OUT].data_len
++ *  ctask->data[ISER_DIR_OUT].data_len
+  */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ 		       unsigned int imm_sz,
+ 		       unsigned int unsol_sz,
+ 		       unsigned int edtl)
+ {
+-	struct iscsi_iser_task *iser_task = task->dd_data;
++	struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ 	struct iser_regd_buf *regd_buf;
+ 	int err;
+-	struct iser_dto *send_dto = &iser_task->desc.dto;
+-	struct iser_hdr *hdr = &iser_task->desc.iser_header;
+-	struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++	struct iser_dto *send_dto = &iser_ctask->desc.dto;
++	struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++	struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+ 
+-	err = iser_dma_map_task_data(iser_task,
++	err = iser_dma_map_task_data(iser_ctask,
+ 				     buf_out,
+ 				     ISER_DIR_OUT,
+ 				     DMA_TO_DEVICE);
+ 	if (err)
+ 		return err;
+ 
+-	if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++	if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ 		iser_err("Total data length: %ld, less than EDTL: %d, "
+ 			 "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+-			 iser_task->data[ISER_DIR_OUT].data_len,
+-			 edtl, task->itt, task->conn);
++			 iser_ctask->data[ISER_DIR_OUT].data_len,
++			 edtl, ctask->itt, ctask->conn);
+ 		return -EINVAL;
+ 	}
+ 
+-	err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++	err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ 	if (err != 0) {
+ 		iser_err("Failed to register write cmd RDMA mem\n");
+ 		return err;
+ 	}
+ 
+-	regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++	regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ 
+ 	if (unsol_sz < edtl) {
+ 		hdr->flags     |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task
+ 
+ 		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ 			 "VA:%#llX + unsol:%d\n",
+-			 task->itt, regd_buf->reg.rkey,
++			 ctask->itt, regd_buf->reg.rkey,
+ 			 (unsigned long long)regd_buf->reg.va, unsol_sz);
+ 	}
+ 
+ 	if (imm_sz > 0) {
+ 		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+-			 task->itt, imm_sz);
++			 ctask->itt, imm_sz);
+ 		iser_dto_add_regd_buff(send_dto,
+ 				       regd_buf,
+ 				       0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn,
+ /**
+  * iser_send_command - send command PDU
+  */
+-int iser_send_command(struct iscsi_conn *conn,
+-		      struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn     *conn,
++		      struct iscsi_cmd_task *ctask)
+ {
+ 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+-	struct iscsi_iser_task *iser_task = task->dd_data;
++	struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ 	struct iser_dto *send_dto = NULL;
+ 	unsigned long edtl;
+ 	int err = 0;
+ 	struct iser_data_buf *data_buf;
+ 
+-	struct iscsi_cmd *hdr =  task->hdr;
+-	struct scsi_cmnd *sc  =  task->sc;
++	struct iscsi_cmd *hdr =  ctask->hdr;
++	struct scsi_cmnd *sc  =  ctask->sc;
+ 
+ 	if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ 		iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ 		return -EPERM;
+ 	}
+-	if (iser_check_xmit(conn, task))
++	if (iser_check_xmit(conn, ctask))
+ 		return -ENOBUFS;
+ 
+ 	edtl = ntohl(hdr->data_length);
+ 
+ 	/* build the tx desc regd header and add it to the tx desc dto */
+-	iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+-	send_dto = &iser_task->desc.dto;
+-	send_dto->task = iser_task;
+-	iser_create_send_desc(iser_conn, &iser_task->desc);
++	iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++	send_dto = &iser_ctask->desc.dto;
++	send_dto->ctask = iser_ctask;
++	iser_create_send_desc(iser_conn, &iser_ctask->desc);
+ 
+ 	if (hdr->flags & ISCSI_FLAG_CMD_READ)
+-		data_buf = &iser_task->data[ISER_DIR_IN];
++		data_buf = &iser_ctask->data[ISER_DIR_IN];
+ 	else
+-		data_buf = &iser_task->data[ISER_DIR_OUT];
++		data_buf = &iser_ctask->data[ISER_DIR_OUT];
+ 
+ 	if (scsi_sg_count(sc)) { /* using a scatter list */
+ 		data_buf->buf  = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn 
+ 	data_buf->data_len = scsi_bufflen(sc);
+ 
+ 	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+-		err = iser_prepare_read_cmd(task, edtl);
++		err = iser_prepare_read_cmd(ctask, edtl);
+ 		if (err)
+ 			goto send_command_error;
+ 	}
+ 	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+-		err = iser_prepare_write_cmd(task,
+-					     task->imm_count,
+-				             task->imm_count +
+-					     task->unsol_count,
++		err = iser_prepare_write_cmd(ctask,
++					     ctask->imm_count,
++				             ctask->imm_count +
++					     ctask->unsol_count,
+ 					     edtl);
+ 		if (err)
+ 			goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn 
+ 		goto send_command_error;
+ 	}
+ 
+-	iser_task->status = ISER_TASK_STATUS_STARTED;
++	iser_ctask->status = ISER_TASK_STATUS_STARTED;
+ 
+-	err = iser_post_send(&iser_task->desc);
++	err = iser_post_send(&iser_ctask->desc);
+ 	if (!err)
+ 		return 0;
+ 
+ send_command_error:
+ 	iser_dto_buffs_release(send_dto);
+-	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++	iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ 	return err;
+ }
+ 
+ /**
+  * iser_send_data_out - send data out PDU
+  */
+-int iser_send_data_out(struct iscsi_conn *conn,
+-		       struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn     *conn,
++		       struct iscsi_cmd_task *ctask,
+ 		       struct iscsi_data *hdr)
+ {
+ 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+-	struct iscsi_iser_task *iser_task = task->dd_data;
++	struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ 	struct iser_desc *tx_desc = NULL;
+ 	struct iser_dto *send_dto = NULL;
+ 	unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn
+ 		return -EPERM;
+ 	}
+ 
+-	if (iser_check_xmit(conn, task))
++	if (iser_check_xmit(conn, ctask))
+ 		return -ENOBUFS;
+ 
+ 	itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn
+ 
+ 	/* build the tx desc regd header and add it to the tx desc dto */
+ 	send_dto = &tx_desc->dto;
+-	send_dto->task = iser_task;
++	send_dto->ctask = iser_ctask;
+ 	iser_create_send_desc(iser_conn, tx_desc);
+ 
+ 	iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn
+ 
+ 	/* all data was registered for RDMA, we can use the lkey */
+ 	iser_dto_add_regd_buff(send_dto,
+-			       &iser_task->rdma_regd[ISER_DIR_OUT],
++			       &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ 			       buf_offset,
+ 			       data_seg_len);
+ 
+-	if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++	if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ 		iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ 			 "inconsistent with total len:%ld, itt:%d\n",
+ 			 buf_offset, data_seg_len,
+-			 iser_task->data[ISER_DIR_OUT].data_len, itt);
++			 iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ 		err = -EINVAL;
+ 		goto send_data_out_error;
+ 	}
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+ 
+ int iser_send_control(struct iscsi_conn *conn,
+-		      struct iscsi_task *task)
++		      struct iscsi_mgmt_task *mtask)
+ {
+ 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+-	struct iscsi_iser_task *iser_task = task->dd_data;
+-	struct iser_desc *mdesc = &iser_task->desc;
++	struct iser_desc *mdesc = mtask->dd_data;
+ 	struct iser_dto *send_dto = NULL;
+ 	unsigned long data_seg_len;
+ 	int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn 
+ 		return -EPERM;
+ 	}
+ 
+-	if (iser_check_xmit(conn, task))
++	if (iser_check_xmit(conn,mtask))
+ 		return -ENOBUFS;
+ 
+ 	/* build the tx desc regd header and add it to the tx desc dto */
+ 	mdesc->type = ISCSI_TX_CONTROL;
+ 	send_dto = &mdesc->dto;
+-	send_dto->task = NULL;
++	send_dto->ctask = NULL;
+ 	iser_create_send_desc(iser_conn, mdesc);
+ 
+ 	device = iser_conn->ib_conn->device;
+ 
+ 	iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+ 
+-	data_seg_len = ntoh24(task->hdr->dlength);
++	data_seg_len = ntoh24(mtask->hdr->dlength);
+ 
+ 	if (data_seg_len > 0) {
+ 		regd_buf = &mdesc->data_regd_buf;
+ 		memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ 		regd_buf->device = device;
+-		regd_buf->virt_addr = task->data;
+-		regd_buf->data_size = task->data_count;
++		regd_buf->virt_addr = mtask->data;
++		regd_buf->data_size = mtask->data_count;
+ 		iser_reg_single(device, regd_buf,
+ 				DMA_TO_DEVICE);
+ 		iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ 			 unsigned long dto_xfer_len)
+ {
+-	struct iser_dto *dto = &rx_desc->dto;
++	struct iser_dto        *dto = &rx_desc->dto;
+ 	struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+-	struct iscsi_task *task;
+-	struct iscsi_iser_task *iser_task;
++	struct iscsi_session *session = conn->iscsi_conn->session;
++	struct iscsi_cmd_task *ctask;
++	struct iscsi_iser_cmd_task *iser_ctask;
+ 	struct iscsi_hdr *hdr;
+ 	char   *rx_data = NULL;
+ 	int     rx_data_len = 0;
++	unsigned int itt;
+ 	unsigned char opcode;
+ 
+ 	hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_des
+ 	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ 
+ 	if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+-		spin_lock(&conn->iscsi_conn->session->lock);
+-		task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+-		if (task)
+-			__iscsi_get_task(task);
+-		spin_unlock(&conn->iscsi_conn->session->lock);
+-
+-		if (!task)
++	        itt = get_itt(hdr->itt); /* mask out cid and age bits */
++		if (!(itt < session->cmds_max))
+ 			iser_err("itt can't be matched to task!!! "
+-				 "conn %p opcode %d itt %d\n",
+-				 conn->iscsi_conn, opcode, hdr->itt);
+-		else {
+-			iser_task = task->dd_data;
+-			iser_dbg("itt %d task %p\n",hdr->itt, task);
+-			iser_task->status = ISER_TASK_STATUS_COMPLETED;
+-			iser_task_rdma_finalize(iser_task);
+-			iscsi_put_task(task);
+-		}
++				 "conn %p opcode %d cmds_max %d itt %d\n",
++				 conn->iscsi_conn,opcode,session->cmds_max,itt);
++		/* use the mapping given with the cmds array indexed by itt */
++		ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++		iser_ctask = ctask->dd_data;
++		iser_dbg("itt %d ctask %p\n",itt,ctask);
++		iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++		iser_ctask_rdma_finalize(iser_ctask);
+ 	}
++
+ 	iser_dto_buffs_release(dto);
+ 
+ 	iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_des
+ 	struct iser_conn       *ib_conn = dto->ib_conn;
+ 	struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ 	struct iscsi_conn      *conn = iser_conn->iscsi_conn;
+-	struct iscsi_task *task;
++	struct iscsi_mgmt_task *mtask;
+ 	int resume_tx = 0;
+ 
+ 	iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_des
+ 
+ 	if (tx_desc->type == ISCSI_TX_CONTROL) {
+ 		/* this arithmetic is legal by libiscsi dd_data allocation */
+-		task = (void *) ((long)(void *)tx_desc -
+-				  sizeof(struct iscsi_task));
+-		if (task->hdr->itt == RESERVED_ITT)
+-			iscsi_put_task(task);
++		mtask = (void *) ((long)(void *)tx_desc -
++				  sizeof(struct iscsi_mgmt_task));
++		if (mtask->hdr->itt == RESERVED_ITT) {
++			struct iscsi_session *session = conn->session;
++
++			spin_lock(&conn->session->lock);
++			iscsi_free_mgmt_task(conn, mtask);
++			spin_unlock(&session->lock);
++		}
+ 	}
+ }
+ 
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+ 
+ {
+-	iser_task->status = ISER_TASK_STATUS_INIT;
++	iser_ctask->status = ISER_TASK_STATUS_INIT;
+ 
+-	iser_task->dir[ISER_DIR_IN] = 0;
+-	iser_task->dir[ISER_DIR_OUT] = 0;
++	iser_ctask->dir[ISER_DIR_IN] = 0;
++	iser_ctask->dir[ISER_DIR_OUT] = 0;
+ 
+-	iser_task->data[ISER_DIR_IN].data_len  = 0;
+-	iser_task->data[ISER_DIR_OUT].data_len = 0;
++	iser_ctask->data[ISER_DIR_IN].data_len  = 0;
++	iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+ 
+-	memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++	memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ 	       sizeof(struct iser_regd_buf));
+-	memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++	memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ 	       sizeof(struct iser_regd_buf));
+ }
+ 
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ 	int deferred;
+ 	int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscs
+ 	/* if we were reading, copy back to unaligned sglist,
+ 	 * anyway dma_unmap and free the copy
+ 	 */
+-	if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++	if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ 		is_rdma_aligned = 0;
+-		iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++		iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ 	}
+-	if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++	if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ 		is_rdma_aligned = 0;
+-		iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++		iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ 	}
+ 
+-	if (iser_task->dir[ISER_DIR_IN]) {
+-		regd = &iser_task->rdma_regd[ISER_DIR_IN];
++	if (iser_ctask->dir[ISER_DIR_IN]) {
++		regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ 		deferred = iser_regd_buff_release(regd);
+ 		if (deferred) {
+ 			iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscs
+ 		}
+ 	}
+ 
+-	if (iser_task->dir[ISER_DIR_OUT]) {
+-		regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++	if (iser_ctask->dir[ISER_DIR_OUT]) {
++		regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ 		deferred = iser_regd_buff_release(regd);
+ 		if (deferred) {
+ 			iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscs
+ 
+        /* if the data was unaligned, it was already unmapped and then copied */
+        if (is_rdma_aligned)
+-		iser_dma_unmap_task_data(iser_task);
++		iser_dma_unmap_task_data(iser_ctask);
+ }
+ 
+ void iser_dto_buffs_release(struct iser_dto *dto)
+Index: ofed_kernel/drivers/infiniband/ulp/iser/iser_memory.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/iser/iser_memory.c
++++ ofed_kernel/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+  */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device 
+ /**
+  * iser_start_rdma_unaligned_sg
+  */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ 					enum iser_data_dir cmd_dir)
+ {
+ 	int dma_nents;
+ 	struct ib_device *dev;
+ 	char *mem = NULL;
+-	struct iser_data_buf *data = &iser_task->data[cmd_dir];
++	struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ 	unsigned long  cmd_data_len = data->data_len;
+ 
+ 	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(
+ 		}
+ 	}
+ 
+-	sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+-	iser_task->data_copy[cmd_dir].buf  =
+-		&iser_task->data_copy[cmd_dir].sg_single;
+-	iser_task->data_copy[cmd_dir].size = 1;
++	sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++	iser_ctask->data_copy[cmd_dir].buf  =
++		&iser_ctask->data_copy[cmd_dir].sg_single;
++	iser_ctask->data_copy[cmd_dir].size = 1;
+ 
+-	iser_task->data_copy[cmd_dir].copy_buf  = mem;
++	iser_ctask->data_copy[cmd_dir].copy_buf  = mem;
+ 
+-	dev = iser_task->iser_conn->ib_conn->device->ib_device;
++	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ 	dma_nents = ib_dma_map_sg(dev,
+-				  &iser_task->data_copy[cmd_dir].sg_single,
++				  &iser_ctask->data_copy[cmd_dir].sg_single,
+ 				  1,
+ 				  (cmd_dir == ISER_DIR_OUT) ?
+ 				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ 	BUG_ON(dma_nents == 0);
+ 
+-	iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++	iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ 	return 0;
+ }
+ 
+ /**
+  * iser_finalize_rdma_unaligned_sg
+  */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ 				     enum iser_data_dir         cmd_dir)
+ {
+ 	struct ib_device *dev;
+ 	struct iser_data_buf *mem_copy;
+ 	unsigned long  cmd_data_len;
+ 
+-	dev = iser_task->iser_conn->ib_conn->device->ib_device;
+-	mem_copy = &iser_task->data_copy[cmd_dir];
++	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++	mem_copy = &iser_ctask->data_copy[cmd_dir];
+ 
+ 	ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ 			(cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(str
+ 		/* copy back read RDMA to unaligned sg */
+ 		mem	= mem_copy->copy_buf;
+ 
+-		sgl	= (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+-		sg_size = iser_task->data[ISER_DIR_IN].size;
++		sgl	= (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++		sg_size = iser_ctask->data[ISER_DIR_IN].size;
+ 
+ 		p = mem;
+ 		for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(str
+ 		}
+ 	}
+ 
+-	cmd_data_len = iser_task->data[cmd_dir].data_len;
++	cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+ 
+ 	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ 		free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct i
+ 	}
+ }
+ 
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+-			    struct iser_data_buf *data,
+-			    enum iser_data_dir iser_dir,
+-			    enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++			    struct iser_data_buf       *data,
++			    enum   iser_data_dir       iser_dir,
++			    enum   dma_data_direction  dma_dir)
+ {
+ 	struct ib_device *dev;
+ 
+-	iser_task->dir[iser_dir] = 1;
+-	dev = iser_task->iser_conn->ib_conn->device->ib_device;
++	iser_ctask->dir[iser_dir] = 1;
++	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ 
+ 	data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ 	if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_
+ 	return 0;
+ }
+ 
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ 	struct ib_device *dev;
+ 	struct iser_data_buf *data;
+ 
+-	dev = iser_task->iser_conn->ib_conn->device->ib_device;
++	dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ 
+-	if (iser_task->dir[ISER_DIR_IN]) {
+-		data = &iser_task->data[ISER_DIR_IN];
++	if (iser_ctask->dir[ISER_DIR_IN]) {
++		data = &iser_ctask->data[ISER_DIR_IN];
+ 		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ 	}
+ 
+-	if (iser_task->dir[ISER_DIR_OUT]) {
+-		data = &iser_task->data[ISER_DIR_OUT];
++	if (iser_ctask->dir[ISER_DIR_OUT]) {
++		data = &iser_ctask->data[ISER_DIR_OUT];
+ 		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ 	}
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct isc
+  *
+  * returns 0 on success, errno code on failure
+  */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ 		      enum   iser_data_dir        cmd_dir)
+ {
+-	struct iscsi_conn    *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+-	struct iser_conn     *ib_conn = iser_task->iser_conn->ib_conn;
++	struct iscsi_conn    *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++	struct iser_conn     *ib_conn = iser_ctask->iser_conn->ib_conn;
+ 	struct iser_device   *device = ib_conn->device;
+ 	struct ib_device     *ibdev = device->ib_device;
+-	struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++	struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ 	struct iser_regd_buf *regd_buf;
+ 	int aligned_len;
+ 	int err;
+ 	int i;
+ 	struct scatterlist *sg;
+ 
+-	regd_buf = &iser_task->rdma_regd[cmd_dir];
++	regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+ 
+ 	aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ 	if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_
+ 		iser_data_buf_dump(mem, ibdev);
+ 
+ 		/* unmap the command data before accessing it */
+-		iser_dma_unmap_task_data(iser_task);
++		iser_dma_unmap_task_data(iser_ctask);
+ 
+ 		/* allocate copy buf, if we are writing, copy the */
+ 		/* unaligned scatterlist, dma map the copy        */
+-		if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++		if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ 				return -ENOMEM;
+-		mem = &iser_task->data_copy[cmd_dir];
++		mem = &iser_ctask->data_copy[cmd_dir];
+ 	}
+ 
+ 	/* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_
+ 		err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
+ 		if (err) {
+ 			iser_data_buf_dump(mem, ibdev);
+-			iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+-				 mem->dma_nents,
+-				 ntoh24(iser_task->desc.iscsi_header.dlength));
++			iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++				 ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ 			iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ 				 ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ 				 ib_conn->page_vec->offset);
+Index: ofed_kernel/drivers/infiniband/ulp/iser/iser_verbs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/iser/iser_verbs.c
++++ ofed_kernel/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+  */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -322,18 +324,7 @@ static void iser_conn_release(struct ise
+ 		iser_device_try_release(device);
+ 	if (ib_conn->iser_conn)
+ 		ib_conn->iser_conn->ib_conn = NULL;
+-	iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+-	atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+-	if (atomic_dec_and_test(&ib_conn->refcount))
+-		iser_conn_release(ib_conn);
++	kfree(ib_conn);
+ }
+ 
+ /**
+@@ -357,7 +348,7 @@ void iser_conn_terminate(struct iser_con
+ 	wait_event_interruptible(ib_conn->wait,
+ 				 ib_conn->state == ISER_CONN_DOWN);
+ 
+-	iser_conn_put(ib_conn);
++	iser_conn_release(ib_conn);
+ }
+ 
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -482,7 +473,6 @@ static int iser_cma_handler(struct rdma_
+ 		break;
+ 	case RDMA_CM_EVENT_DISCONNECTED:
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+-	case RDMA_CM_EVENT_ADDR_CHANGE:
+ 		iser_disconnected_handler(cma_id);
+ 		break;
+ 	default:
+@@ -492,15 +482,24 @@ static int iser_cma_handler(struct rdma_
+ 	return ret;
+ }
+ 
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++	struct iser_conn *ib_conn;
++
++	ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++	if (!ib_conn) {
++		iser_err("can't alloc memory for struct iser_conn\n");
++		return -ENOMEM;
++	}
+ 	ib_conn->state = ISER_CONN_INIT;
+ 	init_waitqueue_head(&ib_conn->wait);
+ 	atomic_set(&ib_conn->post_recv_buf_count, 0);
+ 	atomic_set(&ib_conn->post_send_buf_count, 0);
+-	atomic_set(&ib_conn->refcount, 1);
+ 	INIT_LIST_HEAD(&ib_conn->conn_list);
+ 	spin_lock_init(&ib_conn->lock);
++
++	*ibconn = ib_conn;
++	return 0;
+ }
+ 
+  /**

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_02_fix_iscsi_if_h.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_02_fix_iscsi_if_h.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iser_02_fix_iscsi_if_h.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,142 @@
+From c703d2c0ca18a6a5b8f4ecbd5c02654a15fb11ff Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 14:26:17 +0300
+Subject: [PATCH] fix iscsi_if.h
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/iscsi_if.h |   93 ++++++++++++++++++++++--------------------------
+ 1 file changed, 43 insertions(+), 50 deletions(-)
+
+Index: ofed_kernel/include/scsi/iscsi_if.h
+===================================================================
+--- ofed_kernel.orig/include/scsi/iscsi_if.h
++++ ofed_kernel/include/scsi/iscsi_if.h
+@@ -50,7 +50,6 @@ enum iscsi_uevent_e {
+ 	ISCSI_UEVENT_TGT_DSCVR		= UEVENT_BASE + 15,
+ 	ISCSI_UEVENT_SET_HOST_PARAM	= UEVENT_BASE + 16,
+ 	ISCSI_UEVENT_UNBIND_SESSION	= UEVENT_BASE + 17,
+-	ISCSI_UEVENT_CREATE_BOUND_SESSION	= UEVENT_BASE + 18,
+ 
+ 	/* up events */
+ 	ISCSI_KEVENT_RECV_PDU		= KEVENT_BASE + 1,
+@@ -79,12 +78,6 @@ struct iscsi_uevent {
+ 			uint16_t	cmds_max;
+ 			uint16_t	queue_depth;
+ 		} c_session;
+-		struct msg_create_bound_session {
+-			uint64_t	ep_handle;
+-			uint32_t	initial_cmdsn;
+-			uint16_t	cmds_max;
+-			uint16_t	queue_depth;
+-		} c_bound_session;
+ 		struct msg_destroy_session {
+ 			uint32_t	sid;
+ 		} d_session;
+@@ -257,49 +250,42 @@ enum iscsi_param {
+ 
+ 	ISCSI_PARAM_PING_TMO,
+ 	ISCSI_PARAM_RECV_TMO,
+-
+-	ISCSI_PARAM_IFACE_NAME,
+-	ISCSI_PARAM_ISID,
+-	ISCSI_PARAM_INITIATOR_NAME,
+ 	/* must always be last */
+ 	ISCSI_PARAM_MAX,
+ };
+ 
+-#define ISCSI_MAX_RECV_DLENGTH		(1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH		(1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN		(1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN		(1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN		(1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T			(1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN		(1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST		(1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST			(1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN		(1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN	(1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL			(1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN		(1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN		(1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN		(1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME		(1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT			(1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS	(1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT		(1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO		(1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT			(1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS		(1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME			(1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN		(1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD			(1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN		(1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT		(1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO			(1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO		(1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO		(1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO			(1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO			(1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME		(1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID			(1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME		(1ULL << ISCSI_PARAM_INITIATOR_NAME)
++#define ISCSI_MAX_RECV_DLENGTH		(1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH		(1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN		(1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN		(1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN		(1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T			(1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN		(1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST		(1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST			(1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN		(1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN	(1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL			(1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN		(1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN		(1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN		(1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME		(1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT			(1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS	(1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT		(1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO		(1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT			(1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS		(1 << ISCSI_PARAM_CONN_ADDRESS)
++#define ISCSI_USERNAME			(1 << ISCSI_PARAM_USERNAME)
++#define ISCSI_USERNAME_IN		(1 << ISCSI_PARAM_USERNAME_IN)
++#define ISCSI_PASSWORD			(1 << ISCSI_PARAM_PASSWORD)
++#define ISCSI_PASSWORD_IN		(1 << ISCSI_PARAM_PASSWORD_IN)
++#define ISCSI_FAST_ABORT		(1 << ISCSI_PARAM_FAST_ABORT)
++#define ISCSI_ABORT_TMO			(1 << ISCSI_PARAM_ABORT_TMO)
++#define ISCSI_LU_RESET_TMO		(1 << ISCSI_PARAM_LU_RESET_TMO)
++#define ISCSI_HOST_RESET_TMO		(1 << ISCSI_PARAM_HOST_RESET_TMO)
++#define ISCSI_PING_TMO			(1 << ISCSI_PARAM_PING_TMO)
++#define ISCSI_RECV_TMO			(1 << ISCSI_PARAM_RECV_TMO)
+ 
+ /* iSCSI HBA params */
+ enum iscsi_host_param {
+@@ -310,13 +296,20 @@ enum iscsi_host_param {
+ 	ISCSI_HOST_PARAM_MAX,
+ };
+ 
+-#define ISCSI_HOST_HWADDRESS		(1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME	(1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME		(1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS		(1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_HOST_HWADDRESS		(1 << ISCSI_HOST_PARAM_HWADDRESS)
++#define ISCSI_HOST_INITIATOR_NAME	(1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
++#define ISCSI_HOST_NETDEV_NAME		(1 << ISCSI_HOST_PARAM_NETDEV_NAME)
++#define ISCSI_HOST_IPADDRESS		(1 << ISCSI_HOST_PARAM_IPADDRESS)
+ 
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+ 
+ /*
+  * These flags presents iSCSI Data-Path capabilities.

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_cxgb3_0010_states.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_cxgb3_0010_states.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_cxgb3_0010_states.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+---
+ drivers/infiniband/hw/cxgb3/iwch_cm.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: ofed_kernel/drivers/infiniband/hw/cxgb3/iwch_cm.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ ofed_kernel/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -47,6 +47,7 @@
+ #include "iwch_provider.h"
+ #include "iwch_cm.h"
+ 
++#ifdef DEBUG
+ static char *states[] = {
+ 	"idle",
+ 	"listen",
+@@ -62,6 +63,7 @@ static char *states[] = {
+ 	"dead",
+ 	NULL,
+ };
++#endif
+ 
+ int peer2peer = 0;
+ module_param(peer2peer, int, 0644);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_050_to_2_6_24.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
+ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
+ 					const char *buf, size_t count)
+ {
+-	unsigned long wqm_quanta_value;
++	u32 wqm_quanta;
+ 	u32 wqm_config1;
+ 	u32 i = 0;
+ 	struct nes_device *nesdev;
+ 
+-	strict_strtoul(buf, 0, &wqm_quanta_value);
++	wqm_quanta = simple_strtoul(buf, NULL, 0);
+ 	list_for_each_entry(nesdev, &nes_dev_list, list) {
+ 		if (i == ee_flsh_adapter) {
+-			nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
++			nesdev->nesadapter->wqm_quanta = wqm_quanta;
+ 			wqm_config1 = nes_read_indexed(nesdev,
+ 						NES_IDX_WQM_CONFIG1);
+ 			nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1,
+-					((wqm_quanta_value << 1) |
++					((wqm_quanta << 1) |
+ 					(wqm_config1 & 0x00000001)));
+ 			break;
+ 		}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_100_to_2_6_23.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,378 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
+@@ -2,7 +2,6 @@ config INFINIBAND_NES
+ 	tristate "NetEffect RNIC Driver"
+ 	depends on PCI && INET && INFINIBAND
+ 	select LIBCRC32C
+-	select INET_LRO
+ 	---help---
+ 	  This is a low-level driver for NetEffect RDMA enabled
+ 	  Network Interface Cards (RNIC).
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
+@@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
+  */
+ static void nes_print_macaddr(struct net_device *netdev)
+ {
+-	DECLARE_MAC_BUF(mac);
+-
+-	nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n",
+-		  netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq);
++	nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
++			netdev->name,
++			netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
++			netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
++			netdev->irq);
+ }
+ 
+ /**
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
+ 	struct flowi fl;
+ 	struct neighbour *neigh;
+ 	int rc = -1;
+-	DECLARE_MAC_BUF(mac);
+ 
+ 	memset(&fl, 0, sizeof fl);
+ 	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
+ 	if (neigh) {
+ 		if (neigh->nud_state & NUD_VALID) {
+ 			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
+-				  " is %s, Gateway is 0x%08X \n", dst_ip,
+-				  print_mac(mac, neigh->ha), ntohl(rt->rt_gateway));
++				" is %02X:%02X:%02X:%02X:%02X:%02X, Gateway is"
++				" 0x%08X \n",
++				dst_ip, neigh->ha[0], neigh->ha[1], neigh->ha[2],
++				neigh->ha[3], neigh->ha[4], neigh->ha[5],
++				ntohl(rt->rt_gateway));
+ 			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
+ 					     dst_ip, NES_ARP_ADD);
+ 			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
+ 	int arpindex = 0;
+ 	struct nes_device *nesdev;
+ 	struct nes_adapter *nesadapter;
+-	DECLARE_MAC_BUF(mac);
+ 
+ 	/* create an hte and cm_node for this instance */
+ 	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
+ 
+ 	/* copy the mac addr to node context */
+ 	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
+-	nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n",
+-		  print_mac(mac, cm_node->rem_mac));
++	nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
++			" %02x, %02x, %02x, %02x, %02x\n",
++			cm_node->rem_mac[0], cm_node->rem_mac[1],
++			cm_node->rem_mac[2], cm_node->rem_mac[3],
++			cm_node->rem_mac[4], cm_node->rem_mac[5]);
+ 
+ 	add_hte_node(cm_core, cm_node);
+ 	atomic_inc(&cm_nodes_created);
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
+@@ -38,14 +38,9 @@
+ #include <linux/ip.h>
+ #include <linux/tcp.h>
+ #include <linux/if_vlan.h>
+-#include <linux/inet_lro.h>
+ 
+ #include "nes.h"
+ 
+-static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
+-module_param(nes_lro_max_aggr, uint, 0444);
+-MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
+-
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
+ }
+ 
+ 
+-static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr,
+-			       void **tcph, u64 *hdr_flags, void *priv)
+-{
+-	unsigned int ip_len;
+-	struct iphdr *iph;
+-	skb_reset_network_header(skb);
+-	iph = ip_hdr(skb);
+-	if (iph->protocol != IPPROTO_TCP)
+-		return -1;
+-	ip_len = ip_hdrlen(skb);
+-	skb_set_transport_header(skb, ip_len);
+-	*tcph = tcp_hdr(skb);
+-
+-	*hdr_flags = LRO_IPV4 | LRO_TCP;
+-	*iphdr = iph;
+-	return 0;
+-}
+-
+-
+ /**
+  * nes_init_nic_qp
+  */
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
+ 			jumbomode = 1;
+ 		nes_nic_init_timer_defaults(nesdev, jumbomode);
+ 	}
+-	nesvnic->lro_mgr.max_aggr       = nes_lro_max_aggr;
+-	nesvnic->lro_mgr.max_desc       = NES_MAX_LRO_DESCRIPTORS;
+-	nesvnic->lro_mgr.lro_arr        = nesvnic->lro_desc;
+-	nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
+-	nesvnic->lro_mgr.features       = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
+-	nesvnic->lro_mgr.dev            = netdev;
+-	nesvnic->lro_mgr.ip_summed      = CHECKSUM_UNNECESSARY;
+-	nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+ 	return 0;
+ }
+ 
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
+ {
+ 	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
+ 
+-	netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index], &nesvnic->napi);
++	netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index]);
+ }
+ 
+ 
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
+ 	u16 pkt_type;
+ 	u16 rqes_processed = 0;
+ 	u8 sq_cqes = 0;
+-	u8 nes_use_lro = 0;
+ 
+ 	head = cq->cq_head;
+ 	cq_size = cq->cq_size;
+ 	cq->cqes_pending = 1;
+-	if (nesvnic->netdev->features & NETIF_F_LRO)
+-		nes_use_lro = 1;
+ 	do {
+ 		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
+ 				NES_NIC_CQE_VALID) {
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
+ 							>> 16);
+ 					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
+ 							nesvnic->netdev->name, vlan_tag);
+-					if (nes_use_lro)
+-						lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb,
+-								nesvnic->vlan_grp, vlan_tag, NULL);
+-					else
+-						nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
+-				} else {
+-					if (nes_use_lro)
+-						lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
+-					else
+-						nes_netif_rx(rx_skb);
+-				}
++					nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
++				} else
++					nes_netif_rx(rx_skb);
+ 
+ skip_rx_indicate0:
+ 				nesvnic->netdev->last_rx = jiffies;
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
+ 
+ 	} while (1);
+ 
+-	if (nes_use_lro)
+-		lro_flush_all(&nesvnic->lro_mgr);
+ 	if (sq_cqes) {
+ 		barrier();
+ 		/* restart the queue if it had been stopped */
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
+@@ -33,8 +33,6 @@
+ #ifndef __NES_HW_H
+ #define __NES_HW_H
+ 
+-#include <linux/inet_lro.h>
+-
+ #define NES_PHY_TYPE_CX4       1
+ #define NES_PHY_TYPE_1G        2
+ #define NES_PHY_TYPE_IRIS      3
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
+ #define NES_TIMER_ENABLE_LIMIT      4
+ #define NES_MAX_LINK_INTERRUPTS     128
+ #define NES_MAX_LINK_CHECK          200
+-#define NES_MAX_LRO_DESCRIPTORS     32
+-#define NES_LRO_MAX_AGGR            64
+ 
+ struct nes_adapter {
+ 	u64              fw_ver;
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
+ 	u32               msg_enable;
+ 	/* u32 tx_avail; */
+ 	__be32            local_ipaddr;
+-	struct napi_struct   napi;
+ 	spinlock_t           tx_lock;	/* could use netdev tx lock? */
+ 	struct timer_list    rq_wqes_timer;
+ 	u32                  nic_mem_size;
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
+ 	u8  of_device_registered;
+ 	u8  rdma_enabled;
+ 	u8  rx_checksum_disabled;
+-	u32 lro_max_aggr;
+-	struct net_lro_mgr lro_mgr;
+-	struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
+ };
+ 
+ struct nes_ib_device {
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -96,35 +96,38 @@ static int nics_per_function = 1;
+ /**
+  * nes_netdev_poll
+  */
+-static int nes_netdev_poll(struct napi_struct *napi, int budget)
++static int nes_netdev_poll(struct net_device *netdev, int *budget_ptr)
+ {
+-	struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
+-	struct net_device *netdev = nesvnic->netdev;
++	struct nes_vnic *nesvnic = netdev_priv(netdev);
+ 	struct nes_device *nesdev = nesvnic->nesdev;
+ 	struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
+-
+-	nesvnic->budget = budget;
++	nesvnic->budget = min(netdev->quota, *budget_ptr);
+ 	nescq->cqes_pending = 0;
+ 	nescq->rx_cqes_completed = 0;
+ 	nescq->cqe_allocs_pending = 0;
+ 	nescq->rx_pkts_indicated = 0;
+ 
+ 	nes_nic_ce_handler(nesdev, nescq);
++	netdev->quota -= nescq->rx_pkts_indicated;
++	*budget_ptr -= nescq->rx_pkts_indicated;
+ 
+-	if (nescq->cqes_pending == 0) {
+-		netif_rx_complete(netdev, napi);
++	if ((nescq->cqes_pending == 0) && (netdev->quota != 0)) {
++		netif_rx_complete(netdev);
+ 		/* clear out completed cqes and arm */
+ 		nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ 				nescq->cq_number | (nescq->cqe_allocs_pending << 16));
+ 		nes_read32(nesdev->regs+NES_CQE_ALLOC);
++
++		return 0;
+ 	} else {
+ 		/* clear out completed cqes but don't arm */
+ 		nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ 				nescq->cq_number | (nescq->cqe_allocs_pending << 16));
+ 		nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
+ 				nesvnic->netdev->name);
++
++		return 1;
+ 	}
+-	return nescq->rx_pkts_indicated;
+ }
+ 
+ 
+@@ -238,7 +241,6 @@ static int nes_netdev_open(struct net_de
+ 		netif_start_queue(netdev);
+ 		netif_carrier_on(netdev);
+ 	}
+-	napi_enable(&nesvnic->napi);
+ 	nesvnic->netdev_open = 1;
+ 
+ 	return 0;
+@@ -266,7 +268,6 @@ static int nes_netdev_stop(struct net_de
+ 		printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
+ 
+ 	/* Disable network packets */
+-	napi_disable(&nesvnic->napi);
+ 	netif_stop_queue(netdev);
+ 	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
+ 		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
+ 	int i;
+ 	u32 macaddr_low;
+ 	u16 macaddr_high;
+-	DECLARE_MAC_BUF(mac);
+ 
+ 	if (!is_valid_ether_addr(mac_addr->sa_data))
+ 		return -EADDRNOTAVAIL;
+ 
+ 	memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
+-	printk(PFX "%s: Address length = %d, Address = %s\n",
+-	       __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
++	printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
++		   __func__, netdev->addr_len,
++		   mac_addr->sa_data[0], mac_addr->sa_data[1],
++		   mac_addr->sa_data[2], mac_addr->sa_data[3],
++		   mac_addr->sa_data[4], mac_addr->sa_data[5]);
+ 	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
+ 	macaddr_high += (u16)netdev->dev_addr[1];
+ 	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
+ 			if (mc_index >= max_pft_entries_avaiable)
+ 				break;
+ 			if (multicast_addr) {
+-				DECLARE_MAC_BUF(mac);
+-				nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
+-					  print_mac(mac, multicast_addr->dmi_addr),
+-					  perfect_filter_register_address+(mc_index * 8),
+-					  mc_nic_index);
++				nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
++						  multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
++						  multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
++						  multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
++						  perfect_filter_register_address+(mc_index * 8), mc_nic_index);
+ 				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
+ 				macaddr_high += (u16)multicast_addr->dmi_addr[1];
+ 				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
+ 	"CQ Depth 32",
+ 	"CQ Depth 128",
+ 	"CQ Depth 256",
+-	"LRO aggregated",
+-	"LRO flushed",
+-	"LRO no_desc",
+ };
+ 
+ #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
+ 
+ }
+ 
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
+ 	.set_sg = ethtool_op_set_sg,
+ 	.get_tso = ethtool_op_get_tso,
+ 	.set_tso = ethtool_op_set_tso,
+-	.get_flags = ethtool_op_get_flags,
+-	.set_flags = ethtool_op_set_flags,
+ };
+ 
+ 
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
+ 	netdev->type = ARPHRD_ETHER;
+ 	netdev->features = NETIF_F_HIGHDMA;
+ 	netdev->ethtool_ops = &nes_ethtool_ops;
+-	netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
++	netdev->poll = nes_netdev_poll;
++	netdev->weight = 128;
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
+ 
+ 	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
+ 		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+-		netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+ 	} else {
+ 		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ 	}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_200_to_2_6_22.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,55 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
+@@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
+ 						nesdev->nesadapter->port_count;
+ 	}
+ 
+-	if ((limit_maxrdreqsz ||
+-	     ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) &&
+-	      (hw_rev == NE020_REV1))) &&
+-	    (pcie_get_readrq(pcidev) > 256)) {
+-		if (pcie_set_readrq(pcidev, 256))
+-			printk(KERN_ERR PFX "Unable to set max read request"
+-				" to 256 bytes\n");
+-		else
+-			nes_debug(NES_DBG_INIT, "Max read request size set"
+-				" to 256 bytes\n");
++	if (limit_maxrdreqsz ||
++	    ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) &&
++	     (hw_rev == NE020_REV1))) {
++		u16 maxrdreqword;
++		pci_read_config_word(pcidev, 0x68, &maxrdreqword);
++		/* set bits 12-14 to 001b = 256 bytes */
++		if ((maxrdreqword & 0x7000) > 0x1000) {
++			maxrdreqword &= 0x8fff;
++			maxrdreqword |= 0x1000;
++			if (pci_write_config_word(pcidev, 0x68, maxrdreqword))
++				printk(KERN_ERR PFX "Unable to set max read "
++					"request to 256 bytes\n");
++			else
++				nes_debug(NES_DBG_INIT, "Max read request size"
++					"set to 256 bytes\n");
++		}
+ 	}
+ 
+ 	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
+ 	u32 crc_value;
+ 	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
+ 
+-	/*
+-	 * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc
+-	 * state in cpu order"), behavior of crc32c changes on
+-	 * big-endian platforms.  Our algorithm expects the previous
+-	 * behavior; otherwise we have RDMA connection establishment
+-	 * issue on big-endian.
+-	 */
+-	return cpu_to_le32(crc_value);
++	return crc_value;
+ }
+ 
+ static inline void

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/linux_genalloc_to_2_6_20.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/linux_genalloc_to_2_6_20.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/linux_genalloc_to_2_6_20.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,21 @@
+---
+ drivers/infiniband/core/Makefile   |    2 ++
+ drivers/infiniband/core/genalloc.c |    1 +
+ 2 files changed, 3 insertions(+)
+
+Index: ofed_kernel/drivers/infiniband/core/Makefile
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/Makefile
++++ ofed_kernel/drivers/infiniband/core/Makefile
+@@ -30,3 +30,5 @@ ib_umad-y :=			user_mad.o
+ ib_ucm-y :=			ucm.o
+ 
+ ib_uverbs-y :=			uverbs_main.o uverbs_cmd.o uverbs_marshall.o
++
++ib_core-y +=			genalloc.o
+Index: ofed_kernel/drivers/infiniband/core/genalloc.c
+===================================================================
+--- /dev/null
++++ ofed_kernel/drivers/infiniband/core/genalloc.c
+@@ -0,0 +1 @@
++#include "src/genalloc.c"

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0030_smp_call_function.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0030_smp_call_function.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0030_smp_call_function.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,35 @@
+---
+ drivers/infiniband/hw/mlx4/wc.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/mlx4/wc.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/wc.c
++++ ofed_kernel/drivers/infiniband/hw/mlx4/wc.c
+@@ -111,7 +111,7 @@ static int read_and_modify_pat(void)
+ 	preempt_disable();
+ 	rd_old_pat(&ret);
+ 	if (!ret)
+-		smp_call_function(rd_old_pat, &ret, 1);
++		smp_call_function(rd_old_pat, &ret, 1, 1);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -119,7 +119,7 @@ static int read_and_modify_pat(void)
+ 	if (ret)
+ 		goto out;
+ 
+-	smp_call_function(wr_new_pat, &ret, 1);
++	smp_call_function(wr_new_pat, &ret, 1, 1);
+ 	BUG_ON(ret); /* have inconsistent PAT state */
+ out:
+ 	preempt_enable();
+@@ -133,7 +133,7 @@ static int restore_pat(void)
+ 	preempt_disable();
+ 	wr_old_pat(&ret);
+ 	if (!ret) {
+-		smp_call_function(wr_old_pat, &ret, 1);
++		smp_call_function(wr_old_pat, &ret, 1, 1);
+ 		BUG_ON(ret); /* have inconsistent PAT state */
+ 	}
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0040_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0040_pci_dma_mapping_error_to_2_6_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0040_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+---
+ drivers/net/mlx4/eq.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: ofed_kernel/drivers/net/mlx4/eq.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/mlx4/eq.c
++++ ofed_kernel/drivers/net/mlx4/eq.c
+@@ -532,7 +532,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev
+ 		return -ENOMEM;
+ 	priv->eq_table.icm_dma  = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
+ 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+-	if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
++	if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
+ 		__free_page(priv->eq_table.icm_page);
+ 		return -ENOMEM;
+ 	}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0050_wc.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0050_wc.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0050_wc.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+---
+ drivers/infiniband/hw/mlx4/wc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/mlx4/wc.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/wc.c
++++ ofed_kernel/drivers/infiniband/hw/mlx4/wc.c
+@@ -143,7 +143,7 @@ static int restore_pat(void)
+ 
+ int mlx4_enable_wc(void)
+ {
+-	struct cpuinfo_x86 *c = &cpu_data(0);
++	struct cpuinfo_x86 *c = &(cpu_data)[0];
+ 	int ret;
+ 
+ 	if (wc_enabled)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0060_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0060_sysfs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_0060_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,236 @@
+---
+ drivers/infiniband/hw/mlx4/main.c |  153 +++++++++++++++++---------------------
+ 1 file changed, 71 insertions(+), 82 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/mlx4/main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/main.c
++++ ofed_kernel/drivers/infiniband/hw/mlx4/main.c
+@@ -581,51 +581,42 @@ out:
+ 	return err;
+ }
+ 
+-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_hca(struct class_device *cdev, char *buf)
+ {
+-	struct mlx4_ib_dev *dev =
+-		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
++	struct mlx4_ib_dev *dev = container_of(cdev, struct mlx4_ib_dev, ib_dev.class_dev);
+ 	return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
+ }
+ 
+-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+-			   char *buf)
++static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+ {
+-	struct mlx4_ib_dev *dev =
+-		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
++	struct mlx4_ib_dev *dev = container_of(cdev, struct mlx4_ib_dev, ib_dev.class_dev);
+ 	return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
+ 		       (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
+ 		       (int) dev->dev->caps.fw_ver & 0xffff);
+ }
+ 
+-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_rev(struct class_device *cdev, char *buf)
+ {
+-	struct mlx4_ib_dev *dev =
+-		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
++	struct mlx4_ib_dev *dev = container_of(cdev, struct mlx4_ib_dev, ib_dev.class_dev);
+ 	return sprintf(buf, "%x\n", dev->dev->rev_id);
+ }
+ 
+-static ssize_t show_board(struct device *device, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_board(struct class_device *cdev, char *buf)
+ {
+-	struct mlx4_ib_dev *dev =
+-		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
+-	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
+-		       dev->dev->board_id);
+-}
+-
+-static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
+-static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
+-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
+-static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
+-
+-static struct device_attribute *mlx4_class_attributes[] = {
+-	&dev_attr_hw_rev,
+-	&dev_attr_fw_ver,
+-	&dev_attr_hca_type,
+-	&dev_attr_board_id
++	struct mlx4_ib_dev *dev = container_of(cdev, struct mlx4_ib_dev, ib_dev.class_dev);
++	return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
++}
++
++static CLASS_DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
++static CLASS_DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
++static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
++static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
++
++static struct class_device_attribute *mlx4_class_attributes[] = {
++	&class_device_attr_hw_rev,
++	&class_device_attr_fw_ver,
++	&class_device_attr_hca_type,
++	&class_device_attr_board_id
+ };
+ 
+ /*
+@@ -633,23 +624,22 @@ static struct device_attribute *mlx4_cla
+  * the function for _name
+  */
+ #define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod)		\
+-static ssize_t show_rprt_##_name(struct device *dev,		\
+-				 struct device_attribute *attr,	\
++static ssize_t show_rprt_##_name(struct class_device *cdev,	\
+ 				 char *buf){			\
+-	return show_diag_rprt(dev, buf, _offset, _op_mod);	\
++	return show_diag_rprt(cdev, buf, _offset, _op_mod);	\
+ }								\
+-static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
++static CLASS_DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
+ 
+ #define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
+ 
+-static size_t show_diag_rprt(struct device *device, char *buf,
++static size_t show_diag_rprt(struct class_device *cdev, char *buf,
+                               u32 offset, u8 op_modifier)
+ {
+ 	size_t ret;
+ 	u32 counter_offset = offset;
+ 	u32 diag_counter = 0;
+-	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
+-					       ib_dev.dev);
++	struct mlx4_ib_dev *dev = container_of(cdev, struct mlx4_ib_dev,
++					       ib_dev.class_dev);
+ 
+ 	ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
+ 				       &counter_offset, &diag_counter);
+@@ -659,13 +649,12 @@ static size_t show_diag_rprt(struct devi
+ 	return sprintf(buf,"%d\n", diag_counter);
+ }
+ 
+-static ssize_t clear_diag_counters(struct device *device,
+-				   struct device_attribute *attr,
++static ssize_t clear_diag_counters(struct class_device *cdev,
+ 				   const char *buf, size_t length)
+ {
+ 	size_t ret;
+-	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
+-					       ib_dev.dev);
++	struct mlx4_ib_dev *dev = container_of(cdev, struct mlx4_ib_dev,
++					       ib_dev.class_dev);
+ 
+ 	ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
+ 				       NULL, NULL);
+@@ -711,45 +700,45 @@ DEVICE_DIAG_RPRT_ATTR(num_cqovf		, 0x1A0
+ DEVICE_DIAG_RPRT_ATTR(num_eqovf		, 0x1A4, 2);
+ DEVICE_DIAG_RPRT_ATTR(num_baddb		, 0x1A8, 2);
+ 
+-static DEVICE_ATTR(clear_diag, S_IWUGO, NULL, clear_diag_counters);
++static CLASS_DEVICE_ATTR(clear_diag, S_IWUGO, NULL, clear_diag_counters);
+ 
+ static struct attribute *diag_rprt_attrs[] = {
+-	&dev_attr_rq_num_lle.attr,
+-	&dev_attr_sq_num_lle.attr,
+-	&dev_attr_rq_num_lqpoe.attr,
+-	&dev_attr_sq_num_lqpoe.attr,
+-	&dev_attr_rq_num_leeoe.attr,
+-	&dev_attr_sq_num_leeoe.attr,
+-	&dev_attr_rq_num_lpe.attr,
+-	&dev_attr_sq_num_lpe.attr,
+-	&dev_attr_rq_num_wrfe.attr,
+-	&dev_attr_sq_num_wrfe.attr,
+-	&dev_attr_sq_num_mwbe.attr,
+-	&dev_attr_sq_num_bre.attr,
+-	&dev_attr_rq_num_lae.attr,
+-	&dev_attr_sq_num_rire.attr,
+-	&dev_attr_rq_num_rire.attr,
+-	&dev_attr_sq_num_rae.attr,
+-	&dev_attr_rq_num_rae.attr,
+-	&dev_attr_sq_num_roe.attr,
+-	&dev_attr_sq_num_tree.attr,
+-	&dev_attr_sq_num_rree.attr,
+-	&dev_attr_rq_num_rnr.attr,
+-	&dev_attr_sq_num_rnr.attr,
+-	&dev_attr_sq_num_rabrte.attr,
+-	&dev_attr_sq_num_ieecne.attr,
+-	&dev_attr_sq_num_ieecse.attr,
+-	&dev_attr_rq_num_oos.attr,
+-	&dev_attr_sq_num_oos.attr,
+-	&dev_attr_rq_num_mce.attr,
+-	&dev_attr_rq_num_rsync.attr,
+-	&dev_attr_sq_num_rsync.attr,
+-	&dev_attr_rq_num_udsdprd.attr,
+-	&dev_attr_rq_num_ucsdprd.attr,
+-	&dev_attr_num_cqovf.attr,
+-	&dev_attr_num_eqovf.attr,
+-	&dev_attr_num_baddb.attr,
+-	&dev_attr_clear_diag.attr,
++	&class_device_attr_rq_num_lle.attr,
++	&class_device_attr_sq_num_lle.attr,
++	&class_device_attr_rq_num_lqpoe.attr,
++	&class_device_attr_sq_num_lqpoe.attr,
++	&class_device_attr_rq_num_leeoe.attr,
++	&class_device_attr_sq_num_leeoe.attr,
++	&class_device_attr_rq_num_lpe.attr,
++	&class_device_attr_sq_num_lpe.attr,
++	&class_device_attr_rq_num_wrfe.attr,
++	&class_device_attr_sq_num_wrfe.attr,
++	&class_device_attr_sq_num_mwbe.attr,
++	&class_device_attr_sq_num_bre.attr,
++	&class_device_attr_rq_num_lae.attr,
++	&class_device_attr_sq_num_rire.attr,
++	&class_device_attr_rq_num_rire.attr,
++	&class_device_attr_sq_num_rae.attr,
++	&class_device_attr_rq_num_rae.attr,
++	&class_device_attr_sq_num_roe.attr,
++	&class_device_attr_sq_num_tree.attr,
++	&class_device_attr_sq_num_rree.attr,
++	&class_device_attr_rq_num_rnr.attr,
++	&class_device_attr_sq_num_rnr.attr,
++	&class_device_attr_sq_num_rabrte.attr,
++	&class_device_attr_sq_num_ieecne.attr,
++	&class_device_attr_sq_num_ieecse.attr,
++	&class_device_attr_rq_num_oos.attr,
++	&class_device_attr_sq_num_oos.attr,
++	&class_device_attr_rq_num_mce.attr,
++	&class_device_attr_rq_num_rsync.attr,
++	&class_device_attr_sq_num_rsync.attr,
++	&class_device_attr_rq_num_udsdprd.attr,
++	&class_device_attr_rq_num_ucsdprd.attr,
++	&class_device_attr_num_cqovf.attr,
++	&class_device_attr_num_eqovf.attr,
++	&class_device_attr_num_baddb.attr,
++	&class_device_attr_clear_diag.attr,
+ 	NULL
+ };
+ 
+@@ -906,12 +895,12 @@ static void *mlx4_ib_add(struct mlx4_dev
+ 		goto err_reg;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
+-		if (device_create_file(&ibdev->ib_dev.dev,
+-				       mlx4_class_attributes[i]))
++		if (class_device_create_file(&ibdev->ib_dev.class_dev,
++					       mlx4_class_attributes[i]))
+ 			goto err_reg;
+ 	}
+ 
+-	if(sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
++	if(sysfs_create_group(&ibdev->ib_dev.class_dev.kobj, &diag_counters_group))
+ 		goto err_reg;
+ 
+ 	return ibdev;
+@@ -942,7 +931,7 @@ static void mlx4_ib_remove(struct mlx4_d
+ 	if (!ibdev->num_ports)
+ 		return;
+ 
+-	sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
++	sysfs_remove_group(&ibdev->ib_dev.class_dev.kobj, &diag_counters_group);
+ 
+ 	for (p = 1; p <= ibdev->num_ports; ++p)
+ 		mlx4_CLOSE_PORT(dev, p);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0100_to_2.6.24.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,142 @@
+diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
+index a4d6e46..440633a 100644
+--- a/drivers/net/mlx4/en_cq.c
++++ b/drivers/net/mlx4/en_cq.c
+@@ -34,6 +34,7 @@
+ #include <linux/mlx4/cq.h>
+ #include <linux/mlx4/qp.h>
+ #include <linux/mlx4/cmd.h>
++#include <linux/delay.h>
+ 
+ #include "mlx4_en.h"
+ 
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+ 		cq->timer.function = mlx4_en_poll_tx_cq;
+ 		cq->timer.data = (unsigned long) cq;
+ 	} else {
+-		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+-		napi_enable(&cq->napi);
++		char name[IFNAMSIZ];
++
++		snprintf(name, IFNAMSIZ, "mlx4_en-%d-%d", priv->port, cq->ring);
++		cq->poll_dev = alloc_netdev(0, name, ether_setup);
++		if (!cq->poll_dev)
++			return -ENOMEM;
++
++		cq->poll_dev->priv = cq;
++		cq->poll_dev->weight = 64;
++		cq->poll_dev->poll = mlx4_en_poll_rx_cq;
++		set_bit(__LINK_STATE_START, &cq->poll_dev->state);
+ 	}
+ 
+ 	return 0;
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+ 
+ 	if (cq->is_tx)
+ 		del_timer(&cq->timer);
+-	else
+-		napi_disable(&cq->napi);
++	else {
++		while (test_bit(__LINK_STATE_RX_SCHED,
++				&cq->poll_dev->state))
++			msleep(1);
++		free_netdev(cq->poll_dev);
++		cq->poll_dev = NULL;
++	}
+ 
+ 	mlx4_cq_free(mdev->dev, &cq->mcq);
+ }
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index 2d57ae6..3b394d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
+ 	for (i = 0; i < priv->rx_ring_num; i++) {
+ 		cq = &priv->rx_cq[i];
+ 		spin_lock_irqsave(&cq->lock, flags);
+-		napi_synchronize(&cq->napi);
++		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
++			msleep(1);
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 	/* Free RX Rings */
+ 	for (i = 0; i < priv->rx_ring_num; i++) {
+ 		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+-		while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
+-			msleep(1);
+ 		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
+ 	}
+ }
+diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
+index 9d144cd..14b5faf 100644
+--- a/drivers/net/mlx4/en_rx.c
++++ b/drivers/net/mlx4/en_rx.c
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+ 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+ 
+ 	if (priv->port_up)
+-		netif_rx_schedule(cq->dev, &cq->napi);
++		netif_rx_schedule(cq->poll_dev);
+ 	else
+ 		mlx4_en_arm_cq(priv, cq);
+ }
+ 
+ /* Rx CQ polling - called by NAPI */
+-int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
++int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget)
+ {
+-	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
++	struct mlx4_en_cq *cq = poll_dev->priv;
+ 	struct net_device *dev = cq->dev;
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	int done;
++	int work = min(*budget, poll_dev->quota);
+ 
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
++	dev->quota -= done;
++	*budget -= done;
+ 
+ 	/* If we used up all the quota - we're probably not done yet... */
+-	if (done == budget)
++	if (done == work) {
+ 		INC_PERF_COUNTER(priv->pstats.napi_quota);
+-	else {
+-		/* Done for now */
+-		netif_rx_complete(dev, napi);
+-		mlx4_en_arm_cq(priv, cq);
++		return 1;
+ 	}
+-	return done;
++
++        /* Done for now */
++	netif_rx_complete(poll_dev);
++	mlx4_en_arm_cq(priv, cq);
++	return 0;
+ }
+ 
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index ebaed67..456cd91 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
+ 	int                     ring;
+ 	spinlock_t              lock;
+ 	struct net_device      *dev;
+-	struct napi_struct	napi;
++	struct net_device      *poll_dev; /* for napi */
+ 	/* Per-core Tx cq processing support */
+ 	struct timer_list timer;
+ 	int size;
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
+-int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
++int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
+ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ 			     int is_tx, int rss, int qpn, int cqn, int srqn,
+ 			     struct mlx4_qp_context *context);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0110_no_set_flags.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+---
+ drivers/net/mlx4/en_params.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+Index: ofed_kernel/drivers/net/mlx4/en_params.c
+===================================================================
+--- ofed_kernel.orig/drivers/net/mlx4/en_params.c
++++ ofed_kernel/drivers/net/mlx4/en_params.c
+@@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
+ 	.set_pauseparam = mlx4_en_set_pauseparam,
+ 	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
+-	.get_flags = ethtool_op_get_flags,
+-	.set_flags = ethtool_op_set_flags,
+ };
+ 
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0120_ethtool_interface.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0120_ethtool_interface.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mlx4_en_0120_ethtool_interface.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
+index d2daa9d..4a8f036 100644
+--- a/drivers/net/mlx4/en_params.c
++++ b/drivers/net/mlx4/en_params.c
+@@ -238,13 +238,10 @@ static char perf_strings[NUM_PERF_COUNTERS][ETH_GSTRING_LEN] = {
+ };
+ 
+ 
+-static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
++static int mlx4_en_get_sset_count(struct net_device *dev)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 
+-	if (sset != ETH_SS_STATS)
+-		return -EOPNOTSUPP;
+-
+ 	return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
+ }
+ 
+@@ -450,7 +447,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
+ 	.get_tx_csum = ethtool_op_get_tx_csum,
+ 	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ 	.get_strings = mlx4_en_get_strings,
+-	.get_sset_count = mlx4_en_get_sset_count,
++	.get_stats_count = mlx4_en_get_sset_count,
+ 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
+ 	.get_coalesce = mlx4_en_get_coalesce,
+ 	.set_coalesce = mlx4_en_set_coalesce,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0000_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0000_pci_dma_mapping_error_to_2_6_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0000_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,17 @@
+---
+ drivers/infiniband/hw/mthca/mthca_eq.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/mthca/mthca_eq.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mthca/mthca_eq.c
++++ ofed_kernel/drivers/infiniband/hw/mthca/mthca_eq.c
+@@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *d
+ 		return -ENOMEM;
+ 	dev->eq_table.icm_dma  = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
+ 					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+-	if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
++	if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
+ 		__free_page(dev->eq_table.icm_page);
+ 		return -ENOMEM;
+ 	}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0001_pcix_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0001_pcix_to_2_6_22.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0001_pcix_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,57 @@
+---
+ drivers/infiniband/hw/mthca/mthca_main.c |   33 +++++++++++++++++++++++--------
+ 1 file changed, 25 insertions(+), 8 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/mthca/mthca_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mthca/mthca_main.c
++++ ofed_kernel/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -131,23 +131,40 @@ static char mthca_version[] __devinitdat
+ 
+ static int mthca_tune_pci(struct mthca_dev *mdev)
+ {
++	int cap;
++	u16 val;
++
+ 	if (!tune_pci)
+ 		return 0;
+ 
+ 	/* First try to max out Read Byte Count */
+-	if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
+-		if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
+-			mthca_err(mdev, "Couldn't set PCI-X max read count, "
+-				"aborting.\n");
++	cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
++	if (cap) {
++		if (pci_read_config_word(mdev->pdev, cap + PCI_X_CMD, &val)) {
++			mthca_err(mdev, "Couldn't read PCI-X command register, "
++				  "aborting.\n");
++			return -ENODEV;
++		}
++		val = (val & ~PCI_X_CMD_MAX_READ) | (3 << 2);
++		if (pci_write_config_word(mdev->pdev, cap + PCI_X_CMD, val)) {
++			mthca_err(mdev, "Couldn't write PCI-X command register, "
++				  "aborting.\n");
+ 			return -ENODEV;
+ 		}
+ 	} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
+ 		mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
+ 
+-	if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
+-		if (pcie_set_readrq(mdev->pdev, 4096)) {
+-			mthca_err(mdev, "Couldn't write PCI Express read request, "
+-				"aborting.\n");
++	cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);
++	if (cap) {
++		if (pci_read_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, &val)) {
++			mthca_err(mdev, "Couldn't read PCI Express device control "
++				  "register, aborting.\n");
++			return -ENODEV;
++		}
++		val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);
++		if (pci_write_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, val)) {
++			mthca_err(mdev, "Couldn't write PCI Express device control "
++				  "register, aborting.\n");
+ 			return -ENODEV;
+ 		}
+ 	} else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0010_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0010_sysfs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/mthca_0010_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,94 @@
+---
+ drivers/infiniband/hw/mthca/mthca_provider.c |   50 +++++++++++----------------
+ 1 file changed, 21 insertions(+), 29 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/mthca/mthca_provider.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mthca/mthca_provider.c
++++ ofed_kernel/drivers/infiniband/hw/mthca/mthca_provider.c
+@@ -1186,29 +1186,23 @@ static int mthca_unmap_fmr(struct list_h
+ 	return 0;
+ }
+ 
+-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_rev(struct class_device *cdev, char *buf)
+ {
+-	struct mthca_dev *dev =
+-		container_of(device, struct mthca_dev, ib_dev.dev);
++	struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
+ 	return sprintf(buf, "%x\n", dev->rev_id);
+ }
+ 
+-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+-			   char *buf)
++static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+ {
+-	struct mthca_dev *dev =
+-		container_of(device, struct mthca_dev, ib_dev.dev);
++	struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
+ 	return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
+ 		       (int) (dev->fw_ver >> 16) & 0xffff,
+ 		       (int) dev->fw_ver & 0xffff);
+ }
+ 
+-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_hca(struct class_device *cdev, char *buf)
+ {
+-	struct mthca_dev *dev =
+-		container_of(device, struct mthca_dev, ib_dev.dev);
++	struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
+ 	switch (dev->pdev->device) {
+ 	case PCI_DEVICE_ID_MELLANOX_TAVOR:
+ 		return sprintf(buf, "MT23108\n");
+@@ -1224,24 +1218,22 @@ static ssize_t show_hca(struct device *d
+ 	}
+ }
+ 
+-static ssize_t show_board(struct device *device, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_board(struct class_device *cdev, char *buf)
+ {
+-	struct mthca_dev *dev =
+-		container_of(device, struct mthca_dev, ib_dev.dev);
++	struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
+ 	return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
+ }
+ 
+-static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
+-static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
+-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
+-static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
+-
+-static struct device_attribute *mthca_dev_attributes[] = {
+-	&dev_attr_hw_rev,
+-	&dev_attr_fw_ver,
+-	&dev_attr_hca_type,
+-	&dev_attr_board_id
++static CLASS_DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
++static CLASS_DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
++static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
++static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
++
++static struct class_device_attribute *mthca_class_attributes[] = {
++	&class_device_attr_hw_rev,
++	&class_device_attr_fw_ver,
++	&class_device_attr_hca_type,
++	&class_device_attr_board_id
+ };
+ 
+ static int mthca_init_node_data(struct mthca_dev *dev)
+@@ -1403,9 +1395,9 @@ int mthca_register_device(struct mthca_d
+ 	if (ret)
+ 		return ret;
+ 
+-	for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
+-		ret = device_create_file(&dev->ib_dev.dev,
+-					 mthca_dev_attributes[i]);
++	for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
++		ret = class_device_create_file(&dev->ib_dev.class_dev,
++					       mthca_class_attributes[i]);
+ 		if (ret) {
+ 			ib_unregister_device(&dev->ib_dev);
+ 			return ret;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/nes_0010_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/nes_0010_sysfs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/nes_0010_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,120 @@
+---
+ drivers/infiniband/hw/nes/nes_verbs.c |   50 +++++++++++++++-------------------
+ 1 file changed, 23 insertions(+), 27 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/nes/nes_verbs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/nes/nes_verbs.c
++++ ofed_kernel/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -2676,11 +2676,10 @@ static int nes_dereg_mr(struct ib_mr *ib
+ /**
+  * show_rev
+  */
+-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
+-			char *buf)
++static ssize_t show_rev(struct class_device *cdev, char *buf)
+ {
+ 	struct nes_ib_device *nesibdev =
+-			container_of(dev, struct nes_ib_device, ibdev.dev);
++			container_of(cdev, struct nes_ib_device, ibdev.class_dev);
+ 	struct nes_vnic *nesvnic = nesibdev->nesvnic;
+ 
+ 	nes_debug(NES_DBG_INIT, "\n");
+@@ -2691,11 +2690,10 @@ static ssize_t show_rev(struct device *d
+ /**
+  * show_fw_ver
+  */
+-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
+-			   char *buf)
++static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+ {
+ 	struct nes_ib_device *nesibdev =
+-			container_of(dev, struct nes_ib_device, ibdev.dev);
++			container_of(cdev, struct nes_ib_device, ibdev.class_dev);
+ 	struct nes_vnic *nesvnic = nesibdev->nesvnic;
+ 
+ 	nes_debug(NES_DBG_INIT, "\n");
+@@ -2709,8 +2707,7 @@ static ssize_t show_fw_ver(struct device
+ /**
+  * show_hca
+  */
+-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
+-		        char *buf)
++static ssize_t show_hca(struct class_device *cdev, char *buf)
+ {
+ 	nes_debug(NES_DBG_INIT, "\n");
+ 	return sprintf(buf, "NES020\n");
+@@ -2720,24 +2717,23 @@ static ssize_t show_hca(struct device *d
+ /**
+  * show_board
+  */
+-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_board(struct class_device *cdev, char *buf)
+ {
+ 	nes_debug(NES_DBG_INIT, "\n");
+ 	return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
+ }
+ 
+ 
+-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+-
+-static struct device_attribute *nes_dev_attributes[] = {
+-	&dev_attr_hw_rev,
+-	&dev_attr_fw_ver,
+-	&dev_attr_hca_type,
+-	&dev_attr_board_id
++static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
++static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
++static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
++static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
++
++static struct class_device_attribute *nes_class_attributes[] = {
++	&class_device_attr_hw_rev,
++	&class_device_attr_fw_ver,
++	&class_device_attr_hca_type,
++	&class_device_attr_board_id
+ };
+ 
+ 
+@@ -3646,7 +3642,7 @@ struct nes_ib_device *nes_init_ofa_devic
+ 	nesibdev->ibdev.phys_port_cnt = 1;
+ 	nesibdev->ibdev.num_comp_vectors = 1;
+ 	nesibdev->ibdev.dma_device = &nesdev->pcidev->dev;
+-	nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev;
++	nesibdev->ibdev.class_dev.dev = &nesdev->pcidev->dev;
+ 	nesibdev->ibdev.query_device = nes_query_device;
+ 	nesibdev->ibdev.query_port = nes_query_port;
+ 	nesibdev->ibdev.modify_port = nes_modify_port;
+@@ -3741,13 +3737,13 @@ int nes_register_ofa_device(struct nes_i
+ 	nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
+ 	nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
+ 
+-	for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
+-		ret = device_create_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
++	for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
++		ret = class_device_create_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
+ 		if (ret) {
+ 			while (i > 0) {
+ 				i--;
+-				device_remove_file(&nesibdev->ibdev.dev,
+-						   nes_dev_attributes[i]);
++				class_device_remove_file(&nesibdev->ibdev.class_dev,
++						nes_class_attributes[i]);
+ 			}
+ 			ib_unregister_device(&nesibdev->ibdev);
+ 			return ret;
+@@ -3768,8 +3764,8 @@ static void nes_unregister_ofa_device(st
+ 	struct nes_vnic *nesvnic = nesibdev->nesvnic;
+ 	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
+-		device_remove_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
++	for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
++		class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
+ 	}
+ 
+ 	if (nesvnic->of_device_registered) {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_01_class_device.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_01_class_device.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_01_class_device.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,1099 @@
+---
+ drivers/infiniband/ulp/qlgc_vnic/vnic_ib.c      |   75 ++++---
+ drivers/infiniband/ulp/qlgc_vnic/vnic_ib.h      |    2 +-
+ drivers/infiniband/ulp/qlgc_vnic/vnic_main.c    |   11 +-
+ drivers/infiniband/ulp/qlgc_vnic/vnic_main.h    |    5 +-
+ drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.c |    6 +-
+ drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.h |    2 +-
+ drivers/infiniband/ulp/qlgc_vnic/vnic_stats.c   |  126 ++++++------
+ drivers/infiniband/ulp/qlgc_vnic/vnic_stats.h   |   18 +-
+ drivers/infiniband/ulp/qlgc_vnic/vnic_sys.c     |  257 ++++++++++++-----------
+ drivers/infiniband/ulp/qlgc_vnic/vnic_sys.h     |   18 +-
+ 10 files changed, 266 insertions(+), 254 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_ib.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_ib.c
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_ib.c
+@@ -89,33 +89,34 @@ int vnic_ib_init(void)
+ 		goto err_ib_reg;
+ 	}
+ 
+-	interface_dev.dev.class = &vnic_class;
+-	interface_dev.dev.release = vnic_release_dev;
+-	snprintf(interface_dev.dev.bus_id,
++ 	interface_cdev.class_dev.class = &vnic_class;
++ 	snprintf(interface_cdev.class_dev.class_id,
+ 		 BUS_ID_SIZE, "interfaces");
+-	init_completion(&interface_dev.released);
+-	ret = device_register(&interface_dev.dev);
++ 	init_completion(&interface_cdev.released);
++  	ret = class_device_register(&interface_cdev.class_dev);
+ 	if (ret) {
+ 		printk(KERN_ERR PFX "couldn't register class interfaces;"
+ 		       " error %d", ret);
+ 		goto err_class_dev;
+ 	}
+-	ret = device_create_file(&interface_dev.dev,
+-				       &dev_attr_delete_vnic);
++ 	ret = class_device_create_file(&interface_cdev.class_dev,
++ 				       &class_device_attr_delete_vnic);
+ 	if (ret) {
+ 		printk(KERN_ERR PFX "couldn't create class file"
+ 		       " 'delete_vnic'; error %d", ret);
+ 		goto err_class_file;
+ 	}
+ 
+-	ret = device_create_file(&interface_dev.dev, &dev_attr_force_failover);
++	ret = class_device_create_file(&interface_cdev.class_dev,
++					&class_device_attr_force_failover);
+ 	if (ret) {
+ 		printk(KERN_ERR PFX "couldn't create class file"
+ 		       " 'force_failover'; error %d", ret);
+ 		goto err_force_failover_file;
+ 	}
+ 
+-	ret = device_create_file(&interface_dev.dev, &dev_attr_unfailover);
++	ret = class_device_create_file(&interface_cdev.class_dev,
++					&class_device_attr_unfailover);
+ 	if (ret) {
+ 		printk(KERN_ERR PFX "couldn't create class file"
+ 		       " 'unfailover'; error %d", ret);
+@@ -125,11 +126,13 @@ int vnic_ib_init(void)
+ 
+ 	return ret;
+ err_unfailover_file:
+-	device_remove_file(&interface_dev.dev, &dev_attr_force_failover);
++	class_device_remove_file(&interface_cdev.class_dev,
++				 &class_device_attr_force_failover);
+ err_force_failover_file:
+-	device_remove_file(&interface_dev.dev, &dev_attr_delete_vnic);
++	class_device_remove_file(&interface_cdev.class_dev,
++				 &class_device_attr_delete_vnic);
+ err_class_file:
+-	device_unregister(&interface_dev.dev);
++	class_device_unregister(&interface_cdev.class_dev);
+ err_class_dev:
+ 	ib_unregister_client(&vnic_client);
+ err_ib_reg:
+@@ -148,29 +151,29 @@ static struct vnic_ib_port *vnic_add_port(struct vnic_ib_device *device,
+ 	if (!port)
+ 		return NULL;
+ 
+-	init_completion(&port->pdev_info.released);
++	init_completion(&port->cdev_info.released);
+ 	port->dev = device;
+ 	port->port_num = port_num;
+ 
+-	port->pdev_info.dev.class = &vnic_class;
+-	port->pdev_info.dev.parent = NULL;
+-	port->pdev_info.dev.release = vnic_release_dev;
+-	snprintf(port->pdev_info.dev.bus_id, BUS_ID_SIZE,
++	port->cdev_info.class_dev.class = &vnic_class;
++	port->cdev_info.class_dev.dev = device->dev->dma_device;
++	snprintf(port->cdev_info.class_dev.class_id, BUS_ID_SIZE,
+ 		 "vnic-%s-%d", device->dev->name, port_num);
+ 
+-	if (device_register(&port->pdev_info.dev))
++ 	if (class_device_register(&port->cdev_info.class_dev))
+ 		goto free_port;
+ 
+-	if (device_create_file(&port->pdev_info.dev,
+-				     &dev_attr_create_primary))
++ 	if (class_device_create_file(&port->cdev_info.class_dev,
++ 				     &class_device_attr_create_primary))
+ 		goto err_class;
+-	if (device_create_file(&port->pdev_info.dev,
+-				     &dev_attr_create_secondary))
++
++ 	if (class_device_create_file(&port->cdev_info.class_dev,
++ 				     &class_device_attr_create_secondary))
+ 		goto err_class;
+ 
+ 	return port;
+ err_class:
+-	device_unregister(&port->pdev_info.dev);
++	class_device_unregister(&port->cdev_info.class_dev);
+ free_port:
+ 	kfree(port);
+ 
+@@ -219,14 +222,16 @@ static void vnic_remove_one(struct ib_device *device)
+ 	list_for_each_entry_safe(port, tmp_port,
+ 				 &vnic_dev->port_list, list) {
+ 
+-		device_remove_file(&port->pdev_info.dev, &dev_attr_create_primary);
+-		device_remove_file(&port->pdev_info.dev, &dev_attr_create_secondary);
+-		device_unregister(&port->pdev_info.dev);
++		class_device_remove_file(&port->cdev_info.class_dev,
++				&class_device_attr_create_primary);
++		class_device_remove_file(&port->cdev_info.class_dev,
++				&class_device_attr_create_secondary);
++		class_device_unregister(&port->cdev_info.class_dev);
+ 		/*
+ 		 * wait for sysfs entries to go away, so that no new vnics
+ 		 * are created
+ 		 */
+-		wait_for_completion(&port->pdev_info.released);
++		wait_for_completion(&port->cdev_info.released);
+ 		kfree(port);
+ 
+ 	}
+@@ -254,13 +259,15 @@ void vnic_ib_cleanup(void)
+ 
+ 	if (!vnic_ib_inited)
+ 		return;
+-
+-	device_remove_file(&interface_dev.dev, &dev_attr_unfailover);
+-	device_remove_file(&interface_dev.dev, &dev_attr_force_failover);
+-	device_remove_file(&interface_dev.dev, &dev_attr_delete_vnic);
+-
+-	device_unregister(&interface_dev.dev);
+-	wait_for_completion(&interface_dev.released);
++	class_device_remove_file(&interface_cdev.class_dev,
++				&class_device_attr_unfailover);
++	class_device_remove_file(&interface_cdev.class_dev,
++				&class_device_attr_force_failover);
++	class_device_remove_file(&interface_cdev.class_dev,
++				&class_device_attr_delete_vnic);
++
++ 	class_device_unregister(&interface_cdev.class_dev);
++ 	wait_for_completion(&interface_cdev.released);
+ 
+ 	ib_unregister_client(&vnic_client);
+ 	ib_sa_unregister_client(&vnic_sa_client);
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_ib.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_ib.h
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_ib.h
+@@ -109,7 +109,7 @@ struct vnic_ib_device {
+ struct vnic_ib_port {
+ 	struct vnic_ib_device	*dev;
+ 	u8			port_num;
+-	struct dev_info		pdev_info;
++	struct class_dev_info	cdev_info;
+ 	struct list_head	list;
+ };
+ 
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_main.c
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.c
+@@ -770,11 +770,11 @@ static void vnic_handle_free_vnic_evt(struct vnic *vnic)
+ 		kfree(vnic->mc_list);
+ 	}
+ 
+-	sysfs_remove_group(&vnic->dev_info.dev.kobj,
+-			   &vnic_dev_attr_group);
+-	vnic_cleanup_stats_files(vnic);
+-	device_unregister(&vnic->dev_info.dev);
+-	wait_for_completion(&vnic->dev_info.released);
++ 	sysfs_remove_group(&vnic->class_dev_info.class_dev.kobj,
++  			   &vnic_dev_attr_group);
++  	vnic_cleanup_stats_files(vnic);
++  	class_device_unregister(&vnic->class_dev_info.class_dev);
++  	wait_for_completion(&vnic->class_dev_info.released);
+ 	free_netdev(vnic->netdevice);
+ }
+ 
+@@ -1092,6 +1092,7 @@ struct vnic *vnic_allocate(struct vnic_config *config)
+ 	vnic->state = VNIC_UNINITIALIZED;
+ 	vnic->config = config;
+ 
++
+ 	netpath_init(&vnic->primary_path, vnic, 0);
+ 	netpath_init(&vnic->secondary_path, vnic, 1);
+ 
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_main.h
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.h
+@@ -36,7 +36,6 @@
+ #include <linux/timex.h>
+ #include <linux/netdevice.h>
+ #include <linux/kthread.h>
+-#include <linux/fs.h>
+ 
+ #include "vnic_config.h"
+ #include "vnic_netpath.h"
+@@ -104,7 +103,7 @@ struct vnic {
+ 	int				failed_over;
+ 	int				mac_set;
+ 	struct net_device		*netdevice;
+-	struct dev_info			dev_info;
++	struct class_dev_info		class_dev_info;
+ 	struct dev_mc_list		*mc_list;
+ 	int				mc_list_len;
+ 	int				mc_count;
+@@ -130,7 +129,7 @@ struct vnic {
+ 		cycles_t	carrier_off_time;
+ 		u32		carrier_off_num;
+ 	} statistics;
+-	struct dev_info		stat_info;
++	struct class_dev_info	stat_info;
+ #endif	/* CONFIG_INFINIBAND_QLGC_VNIC_STATS */
+ };
+ 
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.c
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.c
+@@ -82,10 +82,10 @@ void netpath_free(struct netpath *netpath)
+ 		return;
+ 	viport_free(netpath->viport);
+ 	netpath->viport = NULL;
+-	sysfs_remove_group(&netpath->dev_info.dev.kobj,
++	sysfs_remove_group(&netpath->class_dev_info.class_dev.kobj,
+ 			   &vnic_path_attr_group);
+-	device_unregister(&netpath->dev_info.dev);
+-	wait_for_completion(&netpath->dev_info.released);
++	class_device_unregister(&netpath->class_dev_info.class_dev);
++	wait_for_completion(&netpath->class_dev_info.released);
+ }
+ 
+ void netpath_init(struct netpath *netpath, struct vnic *vnic,
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.h
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_netpath.h
+@@ -57,7 +57,7 @@ struct netpath {
+ 	u8 			delay_reconnect;
+ 	struct timer_list	timer;
+ 	enum netpath_ts		timer_state;
+-	struct dev_info		dev_info;
++	struct class_dev_info	class_dev_info;
+ };
+ 
+ void netpath_init(struct netpath *netpath, struct vnic *vnic,
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_stats.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_stats.c
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_stats.c
+@@ -43,22 +43,22 @@ cycles_t vnic_recv_ref;
+  *       RDMA times, IOs etc
+  *
+  */
+-static ssize_t show_lifetime(struct device *dev,
+-			     struct device_attribute *dev_attr, char *buf)
++static ssize_t show_lifetime(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 	cycles_t time = get_cycles() - vnic->statistics.start_time;
+ 
+ 	return sprintf(buf, "%llu\n", (unsigned long long)time);
+ }
+ 
+-static DEVICE_ATTR(lifetime, S_IRUGO, show_lifetime, NULL);
++static CLASS_DEVICE_ATTR(lifetime, S_IRUGO, show_lifetime, NULL);
+ 
+-static ssize_t show_conntime(struct device *dev,
+-			     struct device_attribute *dev_attr, char *buf)
++static ssize_t show_conntime(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 
+ 	if (vnic->statistics.conn_time)
+@@ -67,12 +67,12 @@ static ssize_t show_conntime(struct device *dev,
+ 	return 0;
+ }
+ 
+-static DEVICE_ATTR(connection_time, S_IRUGO, show_conntime, NULL);
++static CLASS_DEVICE_ATTR(connection_time, S_IRUGO, show_conntime, NULL);
+ 
+-static ssize_t show_disconnects(struct device *dev,
+-				struct device_attribute *dev_attr, char *buf)
++static ssize_t show_disconnects(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 	u32 num;
+ 
+@@ -84,13 +84,13 @@ static ssize_t show_disconnects(struct device *dev,
+ 	return sprintf(buf, "%d\n", num);
+ }
+ 
+-static DEVICE_ATTR(disconnects, S_IRUGO, show_disconnects, NULL);
++static CLASS_DEVICE_ATTR(disconnects, S_IRUGO, show_disconnects, NULL);
+ 
+-static ssize_t show_total_disconn_time(struct device *dev,
+-				       struct device_attribute *dev_attr,
++static ssize_t show_total_disconn_time(struct class_device *class_dev,
+ 				       char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 	cycles_t time;
+ 
+@@ -103,12 +103,14 @@ static ssize_t show_total_disconn_time(struct device *dev,
+ 	return sprintf(buf, "%llu\n", (unsigned long long)time);
+ }
+ 
+-static DEVICE_ATTR(total_disconn_time, S_IRUGO, show_total_disconn_time, NULL);
++static CLASS_DEVICE_ATTR(total_disconn_time, S_IRUGO,
++			 show_total_disconn_time, NULL);
+ 
+-static ssize_t show_carrier_losses(struct device *dev,
+-				   struct device_attribute *dev_attr, char *buf)
++static ssize_t show_carrier_losses(struct class_device *class_dev,
++				   char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 	u32 num;
+ 
+@@ -120,13 +122,14 @@ static ssize_t show_carrier_losses(struct device *dev,
+ 	return sprintf(buf, "%d\n", num);
+ }
+ 
+-static DEVICE_ATTR(carrier_losses, S_IRUGO, show_carrier_losses, NULL);
++static CLASS_DEVICE_ATTR(carrier_losses, S_IRUGO,
++			 show_carrier_losses, NULL);
+ 
+-static ssize_t show_total_carr_loss_time(struct device *dev,
+-					 struct device_attribute *dev_attr,
++static ssize_t show_total_carr_loss_time(struct class_device *class_dev,
+ 					 char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 	cycles_t time;
+ 
+@@ -139,93 +142,94 @@ static ssize_t show_total_carr_loss_time(struct device *dev,
+ 	return sprintf(buf, "%llu\n", (unsigned long long)time);
+ }
+ 
+-static DEVICE_ATTR(total_carrier_loss_time, S_IRUGO,
++static CLASS_DEVICE_ATTR(total_carrier_loss_time, S_IRUGO,
+ 			 show_total_carr_loss_time, NULL);
+ 
+-static ssize_t show_total_recv_time(struct device *dev,
+-				    struct device_attribute *dev_attr,
++static ssize_t show_total_recv_time(struct class_device *class_dev,
+ 				    char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 
+ 	return sprintf(buf, "%llu\n",
+ 		       (unsigned long long)vnic->statistics.recv_time);
+ }
+ 
+-static DEVICE_ATTR(total_recv_time, S_IRUGO, show_total_recv_time, NULL);
++static CLASS_DEVICE_ATTR(total_recv_time, S_IRUGO,
++			 show_total_recv_time, NULL);
+ 
+-static ssize_t show_recvs(struct device *dev,
+-			  struct device_attribute *dev_attr, char *buf)
++static ssize_t show_recvs(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 
+ 	return sprintf(buf, "%d\n", vnic->statistics.recv_num);
+ }
+ 
+-static DEVICE_ATTR(recvs, S_IRUGO, show_recvs, NULL);
++static CLASS_DEVICE_ATTR(recvs, S_IRUGO, show_recvs, NULL);
+ 
+-static ssize_t show_multicast_recvs(struct device *dev,
+-				    struct device_attribute *dev_attr,
+-				    char *buf)
++static ssize_t show_multicast_recvs(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 
+ 	return sprintf(buf, "%d\n", vnic->statistics.multicast_recv_num);
+ }
+ 
+-static DEVICE_ATTR(multicast_recvs, S_IRUGO, show_multicast_recvs, NULL);
++static CLASS_DEVICE_ATTR(multicast_recvs, S_IRUGO, show_multicast_recvs, NULL);
+ 
+-static ssize_t show_total_xmit_time(struct device *dev,
+-				    struct device_attribute *dev_attr,
++static ssize_t show_total_xmit_time(struct class_device *class_dev,
+ 				    char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 
+ 	return sprintf(buf, "%llu\n",
+ 		       (unsigned long long)vnic->statistics.xmit_time);
+ }
+ 
+-static DEVICE_ATTR(total_xmit_time, S_IRUGO, show_total_xmit_time, NULL);
++static CLASS_DEVICE_ATTR(total_xmit_time, S_IRUGO,
++			 show_total_xmit_time, NULL);
+ 
+-static ssize_t show_xmits(struct device *dev,
+-			  struct device_attribute *dev_attr, char *buf)
++static ssize_t show_xmits(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 
+ 	return sprintf(buf, "%d\n", vnic->statistics.xmit_num);
+ }
+ 
+-static DEVICE_ATTR(xmits, S_IRUGO, show_xmits, NULL);
++static CLASS_DEVICE_ATTR(xmits, S_IRUGO, show_xmits, NULL);
+ 
+-static ssize_t show_failed_xmits(struct device *dev,
+-				 struct device_attribute *dev_attr, char *buf)
++static ssize_t show_failed_xmits(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic *vnic = container_of(info, struct vnic, stat_info);
+ 
+ 	return sprintf(buf, "%d\n", vnic->statistics.xmit_fail);
+ }
+ 
+-static DEVICE_ATTR(failed_xmits, S_IRUGO, show_failed_xmits, NULL);
++static CLASS_DEVICE_ATTR(failed_xmits, S_IRUGO, show_failed_xmits, NULL);
+ 
+ static struct attribute *vnic_stats_attrs[] = {
+-	&dev_attr_lifetime.attr,
+-	&dev_attr_xmits.attr,
+-	&dev_attr_total_xmit_time.attr,
+-	&dev_attr_failed_xmits.attr,
+-	&dev_attr_recvs.attr,
+-	&dev_attr_multicast_recvs.attr,
+-	&dev_attr_total_recv_time.attr,
+-	&dev_attr_connection_time.attr,
+-	&dev_attr_disconnects.attr,
+-	&dev_attr_total_disconn_time.attr,
+-	&dev_attr_carrier_losses.attr,
+-	&dev_attr_total_carrier_loss_time.attr,
++	&class_device_attr_lifetime.attr,
++	&class_device_attr_xmits.attr,
++	&class_device_attr_total_xmit_time.attr,
++	&class_device_attr_failed_xmits.attr,
++	&class_device_attr_recvs.attr,
++	&class_device_attr_multicast_recvs.attr,
++	&class_device_attr_total_recv_time.attr,
++	&class_device_attr_connection_time.attr,
++	&class_device_attr_disconnects.attr,
++	&class_device_attr_total_disconn_time.attr,
++	&class_device_attr_carrier_losses.attr,
++	&class_device_attr_total_carrier_loss_time.attr,
+ 	NULL
+ };
+ 
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_stats.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_stats.h
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_stats.h
+@@ -35,7 +35,6 @@
+ 
+ #include "vnic_main.h"
+ #include "vnic_ib.h"
+-#include "vnic_sys.h"
+ 
+ #ifdef CONFIG_INFINIBAND_QLGC_VNIC_STATS
+ 
+@@ -113,25 +112,24 @@ static inline void vnic_carrier_loss_stats(struct vnic *vnic)
+ static inline int vnic_setup_stats_files(struct vnic *vnic)
+ {
+ 	init_completion(&vnic->stat_info.released);
+-	vnic->stat_info.dev.class = NULL;
+-	vnic->stat_info.dev.parent = &vnic->dev_info.dev;
+-	vnic->stat_info.dev.release = vnic_release_dev;
+-	snprintf(vnic->stat_info.dev.bus_id, BUS_ID_SIZE,
++	vnic->stat_info.class_dev.class = &vnic_class;
++	vnic->stat_info.class_dev.parent = &vnic->class_dev_info.class_dev;
++	snprintf(vnic->stat_info.class_dev.class_id, BUS_ID_SIZE,
+ 		 "stats");
+ 
+-	if (device_register(&vnic->stat_info.dev)) {
++	if (class_device_register(&vnic->stat_info.class_dev)) {
+ 		SYS_ERROR("create_vnic: error in registering"
+ 			  " stat class dev\n");
+ 		goto stats_out;
+ 	}
+ 
+-	if (sysfs_create_group(&vnic->stat_info.dev.kobj,
++	if (sysfs_create_group(&vnic->stat_info.class_dev.kobj,
+ 			       &vnic_stats_attr_group))
+ 		goto err_stats_file;
+ 
+ 	return 0;
+ err_stats_file:
+-	device_unregister(&vnic->stat_info.dev);
++	class_device_unregister(&vnic->stat_info.class_dev);
+ 	wait_for_completion(&vnic->stat_info.released);
+ stats_out:
+ 	return -1;
+@@ -139,9 +137,9 @@ stats_out:
+ 
+ static inline void vnic_cleanup_stats_files(struct vnic *vnic)
+ {
+-	sysfs_remove_group(&vnic->dev_info.dev.kobj,
++	sysfs_remove_group(&vnic->class_dev_info.class_dev.kobj,
+ 			   &vnic_stats_attr_group);
+-	device_unregister(&vnic->stat_info.dev);
++	class_device_unregister(&vnic->stat_info.class_dev);
+ 	wait_for_completion(&vnic->stat_info.released);
+ }
+ 
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_sys.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_sys.c
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_sys.c
+@@ -77,21 +77,20 @@ static match_table_t vnic_opt_tokens = {
+ 	{VNIC_OPT_ERR, NULL}
+ };
+ 
+-void vnic_release_dev(struct device *dev)
++static void vnic_release_class_dev(struct class_device *class_dev)
+ {
+-	struct dev_info *dev_info =
+-	    container_of(dev, struct dev_info, dev);
+-
+-	complete(&dev_info->released);
++	struct class_dev_info *cdev_info =
++	    container_of(class_dev, struct class_dev_info, class_dev);
+ 
++	complete(&cdev_info->released);
+ }
+ 
+ struct class vnic_class = {
+ 	.name = "infiniband_qlgc_vnic",
+-	.dev_release = vnic_release_dev
++	.release = vnic_release_class_dev
+ };
+ 
+-struct dev_info interface_dev;
++struct class_dev_info interface_cdev;
+ 
+ static int vnic_parse_options(const char *buf, struct path_param *param)
+ {
+@@ -285,11 +284,11 @@ out:
+ 
+ }
+ 
+-static ssize_t show_vnic_state(struct device *dev,
+-			       struct device_attribute *dev_attr, char *buf)
++static ssize_t show_vnic_state(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
+-	struct vnic *vnic = container_of(info, struct vnic, dev_info);
++	struct class_dev_info *info =
++	    container_of(class_dev, struct class_dev_info, class_dev);
++	struct vnic *vnic = container_of(info, struct vnic, class_dev_info);
+ 	switch (vnic->state) {
+ 	case VNIC_UNINITIALIZED:
+ 		return sprintf(buf, "VNIC_UNINITIALIZED\n");
+@@ -301,13 +300,13 @@ static ssize_t show_vnic_state(struct device *dev,
+ 
+ }
+ 
+-static DEVICE_ATTR(vnic_state, S_IRUGO, show_vnic_state, NULL);
++static CLASS_DEVICE_ATTR(vnic_state, S_IRUGO, show_vnic_state, NULL);
+ 
+-static ssize_t show_rx_csum(struct device *dev,
+-			    struct device_attribute *dev_attr, char *buf)
++static ssize_t show_rx_csum(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
+-	struct vnic *vnic = container_of(info, struct vnic, dev_info);
++	struct class_dev_info *info =
++	    container_of(class_dev, struct class_dev_info, class_dev);
++	struct vnic *vnic = container_of(info, struct vnic, class_dev_info);
+ 
+ 	if (vnic->config->use_rx_csum)
+ 		return sprintf(buf, "true\n");
+@@ -315,13 +314,13 @@ static ssize_t show_rx_csum(struct device *dev,
+ 		return sprintf(buf, "false\n");
+ }
+ 
+-static DEVICE_ATTR(rx_csum, S_IRUGO, show_rx_csum, NULL);
++static CLASS_DEVICE_ATTR(rx_csum, S_IRUGO, show_rx_csum, NULL);
+ 
+-static ssize_t show_tx_csum(struct device *dev,
+-			    struct device_attribute *dev_attr, char *buf)
++static ssize_t show_tx_csum(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
+-	struct vnic *vnic = container_of(info, struct vnic, dev_info);
++	struct class_dev_info *info =
++	    container_of(class_dev, struct class_dev_info, class_dev);
++	struct vnic *vnic = container_of(info, struct vnic, class_dev_info);
+ 
+ 	if (vnic->config->use_tx_csum)
+ 		return sprintf(buf, "true\n");
+@@ -329,13 +328,13 @@ static ssize_t show_tx_csum(struct device *dev,
+ 		return sprintf(buf, "false\n");
+ }
+ 
+-static DEVICE_ATTR(tx_csum, S_IRUGO, show_tx_csum, NULL);
++static CLASS_DEVICE_ATTR(tx_csum, S_IRUGO, show_tx_csum, NULL);
+ 
+-static ssize_t show_current_path(struct device *dev,
+-				 struct device_attribute *dev_attr, char *buf)
+-{
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
+-	struct vnic *vnic = container_of(info, struct vnic, dev_info);
++static ssize_t show_current_path(struct class_device *class_dev, char *buf)
++ {
++ 	struct class_dev_info *info =
++ 	    container_of(class_dev, struct class_dev_info, class_dev);
++ 	struct vnic *vnic = container_of(info, struct vnic, class_dev_info);
+ 	unsigned long flags;
+ 	size_t length;
+ 
+@@ -350,13 +349,13 @@ static ssize_t show_current_path(struct device *dev,
+ 	return length;
+ }
+ 
+-static DEVICE_ATTR(current_path, S_IRUGO, show_current_path, NULL);
++static CLASS_DEVICE_ATTR(current_path, S_IRUGO, show_current_path, NULL);
+ 
+ static struct attribute *vnic_dev_attrs[] = {
+-	&dev_attr_vnic_state.attr,
+-	&dev_attr_rx_csum.attr,
+-	&dev_attr_tx_csum.attr,
+-	&dev_attr_current_path.attr,
++	&class_device_attr_vnic_state.attr,
++	&class_device_attr_rx_csum.attr,
++	&class_device_attr_tx_csum.attr,
++	&class_device_attr_current_path.attr,
+ 	NULL
+ };
+ 
+@@ -513,21 +512,20 @@ static struct vnic *create_vnic(struct path_param *param)
+ 		goto free_vnic_config;
+ 	}
+ 
+-	init_completion(&vnic->dev_info.released);
++	init_completion(&vnic->class_dev_info.released);
+ 
+-	vnic->dev_info.dev.class = NULL;
+-	vnic->dev_info.dev.parent = &interface_dev.dev;
+-	vnic->dev_info.dev.release = vnic_release_dev;
+-	snprintf(vnic->dev_info.dev.bus_id, BUS_ID_SIZE,
++	vnic->class_dev_info.class_dev.class = &vnic_class;
++	vnic->class_dev_info.class_dev.parent = &interface_cdev.class_dev;
++	snprintf(vnic->class_dev_info.class_dev.class_id, BUS_ID_SIZE,
+ 		 vnic_config->name);
+ 
+-	if (device_register(&vnic->dev_info.dev)) {
++	if (class_device_register(&vnic->class_dev_info.class_dev)) {
+ 		SYS_ERROR("create_vnic: error in registering"
+ 			  " vnic class dev\n");
+ 		goto free_vnic;
+ 	}
+ 
+-	if (sysfs_create_group(&vnic->dev_info.dev.kobj,
++	if (sysfs_create_group(&vnic->class_dev_info.class_dev.kobj,
+ 			       &vnic_dev_attr_group)) {
+ 		SYS_ERROR("create_vnic: error in creating"
+ 			  "vnic attr group\n");
+@@ -540,11 +538,11 @@ static struct vnic *create_vnic(struct path_param *param)
+ 
+ 	return vnic;
+ err_stats:
+-	sysfs_remove_group(&vnic->dev_info.dev.kobj,
++	sysfs_remove_group(&vnic->class_dev_info.class_dev.kobj,
+ 			   &vnic_dev_attr_group);
+ err_attr:
+-	device_unregister(&vnic->dev_info.dev);
+-	wait_for_completion(&vnic->dev_info.released);
++	class_device_unregister(&vnic->class_dev_info.class_dev);
++	wait_for_completion(&vnic->class_dev_info.released);
+ free_vnic:
+ 	list_del(&vnic->list_ptrs);
+ 	kfree(vnic);
+@@ -553,9 +551,8 @@ free_vnic_config:
+ 	return NULL;
+ }
+ 
+-static ssize_t vnic_sysfs_force_failover(struct device *dev,
+-					struct device_attribute *dev_attr, const char *buf,
+-					size_t count)
++static ssize_t vnic_sysfs_force_failover(struct class_device *class_dev,
++				  const char *buf, size_t count)
+ {
+ 	struct vnic *vnic;
+ 	struct list_head *ptr;
+@@ -579,11 +576,10 @@ static ssize_t vnic_sysfs_force_failover(struct device *dev,
+ 	return ret;
+ }
+ 
+-DEVICE_ATTR(force_failover, S_IWUSR, NULL, vnic_sysfs_force_failover);
++CLASS_DEVICE_ATTR(force_failover, S_IWUSR, NULL, vnic_sysfs_force_failover);
+ 
+-static ssize_t vnic_sysfs_unfailover(struct device *dev,
+-					struct device_attribute *dev_attr, const char *buf,
+-					size_t count)
++static ssize_t vnic_sysfs_unfailover(struct class_device *class_dev,
++			      const char *buf, size_t count)
+ {
+ 	struct vnic *vnic;
+ 	struct list_head *ptr;
+@@ -607,9 +603,9 @@ static ssize_t vnic_sysfs_unfailover(struct device *dev,
+ 	return ret;
+ }
+ 
+-DEVICE_ATTR(unfailover, S_IWUSR, NULL, vnic_sysfs_unfailover);
++CLASS_DEVICE_ATTR(unfailover, S_IWUSR, NULL, vnic_sysfs_unfailover);
+ 
+-static ssize_t vnic_delete(struct device *dev, struct device_attribute *dev_attr,
++static ssize_t vnic_delete(struct class_device *class_dev,
+ 			   const char *buf, size_t count)
+ {
+ 	struct vnic *vnic;
+@@ -634,13 +630,14 @@ static ssize_t vnic_delete(struct device *dev, struct device_attribute *dev_attr
+ 	return ret;
+ }
+ 
+-DEVICE_ATTR(delete_vnic, S_IWUSR, NULL, vnic_delete);
++CLASS_DEVICE_ATTR(delete_vnic, S_IWUSR, NULL, vnic_delete);
+ 
+-static ssize_t show_viport_state(struct device *dev,
+-				 struct device_attribute *dev_attr, char *buf)
++static ssize_t show_viport_state(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct class_dev_info *info =
++	    container_of(class_dev, struct class_dev_info, class_dev);
++	struct netpath *path =
++	    container_of(info, struct netpath, class_dev_info);
+ 	switch (path->viport->state) {
+ 	case VIPORT_DISCONNECTED:
+ 		return sprintf(buf, "VIPORT_DISCONNECTED\n");
+@@ -652,13 +649,14 @@ static ssize_t show_viport_state(struct device *dev,
+ 
+ }
+ 
+-static DEVICE_ATTR(viport_state, S_IRUGO, show_viport_state, NULL);
++static CLASS_DEVICE_ATTR(viport_state, S_IRUGO, show_viport_state, NULL);
+ 
+-static ssize_t show_link_state(struct device *dev,
+-			       struct device_attribute *dev_attr, char *buf)
++static ssize_t show_link_state(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct class_dev_info *info =
++	    container_of(class_dev, struct class_dev_info, class_dev);
++	struct netpath *path =
++	    container_of(info, struct netpath, class_dev_info);
+ 
+ 	switch (path->viport->link_state) {
+ 	case LINK_UNINITIALIZED:
+@@ -739,14 +737,15 @@ static ssize_t show_link_state(struct device *dev,
+ 	}
+ 
+ }
+-static DEVICE_ATTR(link_state, S_IRUGO, show_link_state, NULL);
++static CLASS_DEVICE_ATTR(link_state, S_IRUGO, show_link_state, NULL);
+ 
+-static ssize_t show_heartbeat(struct device *dev,
+-			      struct device_attribute *dev_attr, char *buf)
++static ssize_t show_heartbeat(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++	    container_of(class_dev, struct class_dev_info, class_dev);
+ 
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct netpath *path =
++	    container_of(info, struct netpath, class_dev_info);
+ 
+ 	/* hb_inteval is in jiffies, convert it back to
+ 	 * 1/100ths of a second
+@@ -755,20 +754,21 @@ static ssize_t show_heartbeat(struct device *dev,
+ 		(jiffies_to_msecs(path->viport->config->hb_interval)/10));
+ }
+ 
+-static DEVICE_ATTR(heartbeat, S_IRUGO, show_heartbeat, NULL);
++static CLASS_DEVICE_ATTR(heartbeat, S_IRUGO, show_heartbeat, NULL);
+ 
+-static ssize_t show_ioc_guid(struct device *dev,
+-			     struct device_attribute *dev_attr, char *buf)
++static ssize_t show_ioc_guid(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct netpath *path =
++		container_of(info, struct netpath, class_dev_info);
+ 
+ 	return sprintf(buf, "%llx\n",
+ 				__be64_to_cpu(path->viport->config->ioc_guid));
+ }
+ 
+-static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
++static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
+ 
+ static inline void get_dgid_string(u8 *dgid, char *buf)
+ {
+@@ -783,64 +783,68 @@ static inline void get_dgid_string(u8 *dgid, char *buf)
+ 	strcat(buf, "\n");
+ }
+ 
+-static ssize_t show_dgid(struct device *dev,
+-			 struct device_attribute *dev_attr, char *buf)
++static ssize_t show_dgid(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct netpath *path =
++		container_of(info, struct netpath, class_dev_info);
+ 
+ 	get_dgid_string(path->viport->config->path_info.path.dgid.raw, buf);
+ 
+ 	return strlen(buf);
+ }
+ 
+-static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
++static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
+ 
+-static ssize_t show_pkey(struct device *dev,
+-			 struct device_attribute *dev_attr, char *buf)
++static ssize_t show_pkey(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct netpath *path =
++		container_of(info, struct netpath, class_dev_info);
+ 
+ 	return sprintf(buf, "%x\n", path->viport->config->path_info.path.pkey);
+ }
+ 
+-static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
++static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+ 
+-static ssize_t show_hca_info(struct device *dev,
+-			     struct device_attribute *dev_attr, char *buf)
++static ssize_t show_hca_info(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct netpath *path =
++		container_of(info, struct netpath, class_dev_info);
+ 
+ 	return sprintf(buf, "vnic-%s-%d\n", path->viport->config->ibdev->name,
+ 						path->viport->config->port);
+ }
+ 
+-static DEVICE_ATTR(hca_info, S_IRUGO, show_hca_info, NULL);
++static CLASS_DEVICE_ATTR(hca_info, S_IRUGO, show_hca_info, NULL);
+ 
+-static ssize_t show_ioc_string(struct device *dev,
+-			       struct device_attribute *dev_attr, char *buf)
++static ssize_t show_ioc_string(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct netpath *path =
++		container_of(info, struct netpath, class_dev_info);
+ 
+ 	return sprintf(buf, "%s\n", path->viport->config->ioc_string);
+ }
+ 
+-static  DEVICE_ATTR(ioc_string, S_IRUGO, show_ioc_string, NULL);
++static  CLASS_DEVICE_ATTR(ioc_string, S_IRUGO, show_ioc_string, NULL);
+ 
+-static ssize_t show_multicast_state(struct device *dev,
+-				    struct device_attribute *dev_attr,
+-				    char *buf)
++static ssize_t show_multicast_state(struct class_device *class_dev, char *buf)
+ {
+-	struct dev_info *info =	container_of(dev, struct dev_info, dev);
++	struct class_dev_info *info =
++		container_of(class_dev, struct class_dev_info, class_dev);
+ 
+-	struct netpath *path = container_of(info, struct netpath, dev_info);
++	struct netpath *path =
++		container_of(info, struct netpath, class_dev_info);
+ 
+ 	if (!(path->viport->features_supported & VNIC_FEAT_INBOUND_IB_MC))
+ 		return sprintf(buf, "feature not enabled\n");
+@@ -871,18 +875,18 @@ static ssize_t show_multicast_state(struct device *dev,
+ 	return sprintf(buf, "invalid state\n");
+ }
+ 
+-static  DEVICE_ATTR(multicast_state, S_IRUGO, show_multicast_state, NULL);
++static  CLASS_DEVICE_ATTR(multicast_state, S_IRUGO, show_multicast_state, NULL);
+ 
+ static struct attribute *vnic_path_attrs[] = {
+-	&dev_attr_viport_state.attr,
+-	&dev_attr_link_state.attr,
+-	&dev_attr_heartbeat.attr,
+-	&dev_attr_ioc_guid.attr,
+-	&dev_attr_dgid.attr,
+-	&dev_attr_pkey.attr,
+-	&dev_attr_hca_info.attr,
+-	&dev_attr_ioc_string.attr,
+-	&dev_attr_multicast_state.attr,
++	&class_device_attr_viport_state.attr,
++	&class_device_attr_link_state.attr,
++	&class_device_attr_heartbeat.attr,
++	&class_device_attr_ioc_guid.attr,
++	&class_device_attr_dgid.attr,
++	&class_device_attr_pkey.attr,
++	&class_device_attr_hca_info.attr,
++	&class_device_attr_ioc_string.attr,
++	&class_device_attr_multicast_state.attr,
+ 	NULL
+ };
+ 
+@@ -893,19 +897,20 @@ struct attribute_group vnic_path_attr_group = {
+ 
+ static int setup_path_class_files(struct netpath *path, char *name)
+ {
+-	init_completion(&path->dev_info.released);
++	init_completion(&path->class_dev_info.released);
+ 
+-	path->dev_info.dev.class = NULL;
+-	path->dev_info.dev.parent = &path->parent->dev_info.dev;
+-	path->dev_info.dev.release = vnic_release_dev;
+-	snprintf(path->dev_info.dev.bus_id, BUS_ID_SIZE, name);
++	path->class_dev_info.class_dev.class = &vnic_class;
++	path->class_dev_info.class_dev.parent =
++	    &path->parent->class_dev_info.class_dev;
++	snprintf(path->class_dev_info.class_dev.class_id,
++		 BUS_ID_SIZE, name);
+ 
+-	if (device_register(&path->dev_info.dev)) {
++	if (class_device_register(&path->class_dev_info.class_dev)) {
+ 		SYS_ERROR("error in registering path class dev\n");
+ 		goto out;
+ 	}
+ 
+-	if (sysfs_create_group(&path->dev_info.dev.kobj,
++	if (sysfs_create_group(&path->class_dev_info.class_dev.kobj,
+ 			       &vnic_path_attr_group)) {
+ 		SYS_ERROR("error in creating vnic path group attrs");
+ 		goto err_path;
+@@ -914,8 +919,8 @@ static int setup_path_class_files(struct netpath *path, char *name)
+ 	return 0;
+ 
+ err_path:
+-	device_unregister(&path->dev_info.dev);
+-	wait_for_completion(&path->dev_info.released);
++	class_device_unregister(&path->class_dev_info.class_dev);
++	wait_for_completion(&path->class_dev_info.released);
+ out:
+ 	return -1;
+ 
+@@ -1046,13 +1051,13 @@ static ssize_t update_params_and_connect(struct path_param *params,
+ 	}
+ }
+ 
+-static ssize_t vnic_create_primary(struct device *dev,
+-				   struct device_attribute *dev_attr,
++static ssize_t vnic_create_primary(struct class_device *class_dev,
+ 				   const char *buf, size_t count)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
++	struct class_dev_info *cdev =
++	    container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic_ib_port *target =
+-	    container_of(info, struct vnic_ib_port, pdev_info);
++	    container_of(cdev, struct vnic_ib_port, cdev_info);
+ 
+ 	struct path_param param;
+ 	int ret = -EINVAL;
+@@ -1114,15 +1119,15 @@ out:
+ 	return ret;
+ }
+ 
+-DEVICE_ATTR(create_primary, S_IWUSR, NULL, vnic_create_primary);
++CLASS_DEVICE_ATTR(create_primary, S_IWUSR, NULL, vnic_create_primary);
+ 
+-static ssize_t vnic_create_secondary(struct device *dev,
+-				     struct device_attribute *dev_attr,
++static ssize_t vnic_create_secondary(struct class_device *class_dev,
+ 				     const char *buf, size_t count)
+ {
+-	struct dev_info *info = container_of(dev, struct dev_info, dev);
++	struct class_dev_info *cdev =
++	    container_of(class_dev, struct class_dev_info, class_dev);
+ 	struct vnic_ib_port *target =
+-	    container_of(info, struct vnic_ib_port, pdev_info);
++	    container_of(cdev, struct vnic_ib_port, cdev_info);
+ 
+ 	struct path_param param;
+ 	struct vnic *vnic = NULL;
+@@ -1186,4 +1191,4 @@ out:
+ 	return ret;
+ }
+ 
+-DEVICE_ATTR(create_secondary, S_IWUSR, NULL, vnic_create_secondary);
++CLASS_DEVICE_ATTR(create_secondary, S_IWUSR, NULL, vnic_create_secondary);
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_sys.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_sys.h
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_sys.h
+@@ -33,21 +33,19 @@
+ #ifndef VNIC_SYS_H_INCLUDED
+ #define VNIC_SYS_H_INCLUDED
+ 
+-struct dev_info {
+-	struct device		dev;
++struct class_dev_info {
++	struct class_device	class_dev;
+ 	struct completion	released;
+ };
+ 
+ extern struct class vnic_class;
+-extern struct dev_info interface_dev;
++extern struct class_dev_info interface_cdev;
+ extern struct attribute_group vnic_dev_attr_group;
+ extern struct attribute_group vnic_path_attr_group;
+-extern struct device_attribute dev_attr_create_primary;
+-extern struct device_attribute dev_attr_create_secondary;
+-extern struct device_attribute dev_attr_delete_vnic;
+-extern struct device_attribute dev_attr_force_failover;
+-extern struct device_attribute dev_attr_unfailover;
+-
+-extern void vnic_release_dev(struct device *dev);
++extern struct class_device_attribute class_device_attr_create_primary;
++extern struct class_device_attribute class_device_attr_create_secondary;
++extern struct class_device_attribute class_device_attr_delete_vnic;
++extern struct class_device_attribute class_device_attr_force_failover;
++extern struct class_device_attribute class_device_attr_unfailover;
+ 
+ #endif	/*VNIC_SYS_H_INCLUDED*/

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_02_dev_stats.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_02_dev_stats.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/qlgc_vnic_02_dev_stats.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,46 @@
+---
+ drivers/infiniband/ulp/qlgc_vnic/vnic_main.c |    6 +++---
+ drivers/infiniband/ulp/qlgc_vnic/vnic_main.h |    1 +
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_main.c
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.c
+@@ -204,13 +204,13 @@ static struct net_device_stats *vnic_get
+ 	if (np && np->viport) {
+ 		atomic_inc(&np->viport->reference_count);
+ 		spin_unlock_irqrestore(&vnic->current_path_lock, flags);
+-		viport_get_stats(np->viport, &vnic->netdevice->stats);
++		viport_get_stats(np->viport, &vnic->stats);
+ 		atomic_dec(&np->viport->reference_count);
+ 		wake_up(&np->viport->reference_queue);
+ 	} else
+ 		spin_unlock_irqrestore(&vnic->current_path_lock, flags);
+ 
+-	return &vnic->netdevice->stats;
++	return &vnic->stats;
+ }
+ 
+ static int vnic_open(struct net_device *device)
+@@ -259,7 +259,7 @@ static int vnic_hard_start_xmit(struct s
+ 	if (ret) {
+ 		vnic_xmit_fail_stats(vnic);
+ 		dev_kfree_skb_any(skb);
+-		vnic->netdevice->stats.tx_dropped++;
++		vnic->stats.tx_dropped++;
+ 		goto out;
+ 	}
+ 
+Index: ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/qlgc_vnic/vnic_main.h
++++ ofed_kernel/drivers/infiniband/ulp/qlgc_vnic/vnic_main.h
+@@ -102,6 +102,7 @@ struct vnic {
+ 	int				forced_failover;
+ 	int				failed_over;
+ 	int				mac_set;
++	struct net_device_stats 	stats;
+ 	struct net_device		*netdevice;
+ 	struct class_dev_info		class_dev_info;
+ 	struct dev_mc_list		*mc_list;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rds_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rds_to_2_6_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rds_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,18 @@
+---
+ net/rds/connection.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+Index: ofed_kernel/net/rds/connection.c
+===================================================================
+--- ofed_kernel.orig/net/rds/connection.c
++++ ofed_kernel/net/rds/connection.c
+@@ -51,8 +51,7 @@ static struct kmem_cache *rds_conn_slab;
+ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
+ {
+ 	/* Pass NULL, don't need struct net for hash */
+-	unsigned long hash = inet_ehashfn(NULL,
+-					  be32_to_cpu(laddr), 0, 
++	unsigned long hash = inet_ehashfn(be32_to_cpu(laddr), 0,
+ 					  be32_to_cpu(faddr), 0);
+ 	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rnfs_fs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rnfs_fs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/rnfs_fs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,2734 @@
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index cc91227..262397b 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -361,11 +361,14 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	const struct export_operations *nop = mnt->mnt_sb->s_export_op;
+ 	struct dentry *result, *alias;
+ 	int err;
++	__u32 objp[2];
+ 
++	objp[0] = fid->i32.ino;
++	objp[1] = fid->i32.gen;
+ 	/*
+ 	 * Try to get any dentry for the given file handle from the filesystem.
+ 	 */
+-	result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
++	result = nop->get_dentry(mnt->mnt_sb, &objp);
+ 	if (!result)
+ 		result = ERR_PTR(-ESTALE);
+ 	if (IS_ERR(result))
+@@ -417,11 +420,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 		 * file handle.  If this fails we'll have to give up.
+ 		 */
+ 		err = -ESTALE;
+-		if (!nop->fh_to_parent)
++		if (!nop->get_parent)
+ 			goto err_result;
+ 
+-		target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
+-				fh_len, fileid_type);
++		target_dir = nop->get_parent(result);
+ 		if (!target_dir)
+ 			goto err_result;
+ 		err = PTR_ERR(target_dir);
+diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
+index 0b45fd3..2c45814 100644
+--- a/fs/lockd/clntlock.c
++++ b/fs/lockd/clntlock.c
+@@ -168,7 +168,7 @@ __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock
+ 			continue;
+ 		if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
+ 			continue;
+-		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
++		if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode), fh) != 0)
+ 			continue;
+ 		/* Alright, we found a lock. Set the return status
+ 		 * and wake up the caller
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 31668b6..8c72d30 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -128,12 +128,12 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
+ 
+ 	nlmclnt_next_cookie(&argp->cookie);
+ 	argp->state   = nsm_local_state;
+-	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
+-	lock->caller  = utsname()->nodename;
++	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
++	lock->caller  = system_utsname.nodename;
+ 	lock->oh.data = req->a_owner;
+ 	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
+ 				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
+-				utsname()->nodename);
++				system_utsname.nodename);
+ 	lock->svid = fl->fl_u.nfs_fl.owner->pid;
+ 	lock->fl.fl_start = fl->fl_start;
+ 	lock->fl.fl_end = fl->fl_end;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index e4d5635..771edc1 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -194,7 +194,7 @@ static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
+  */
+ static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
+ {
+-	p = xdr_encode_nsm_string(p, utsname()->nodename);
++	p = xdr_encode_nsm_string(p, system_utsname.nodename);
+ 	if (!p)
+ 		return ERR_PTR(-EIO);
+ 
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index cf0d5c2..a353cf5 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -304,7 +304,7 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
+ {
+ 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
+ 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
+-	call->a_args.lock.caller = utsname()->nodename;
++	call->a_args.lock.caller = system_utsname.nodename;
+ 	call->a_args.lock.oh.len = lock->oh.len;
+ 
+ 	/* set default data area */
+@@ -367,8 +367,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 	__be32			ret;
+ 
+ 	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_type, lock->fl.fl_pid,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end,
+@@ -417,11 +417,18 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			ret = nlm_granted;
+ 			goto out;
+ 		case -EAGAIN:
+-			ret = nlm_lck_denied;
+-			goto out;
++			if (wait) {
++				ret = nlm_lck_blocked;
++				break;
++			} else {
++				ret = nlm_lck_denied;
++				goto out;
++			}
+ 		case FILE_LOCK_DEFERRED:
+-			if (wait)
++			if (wait) {
++				ret = nlm_lck_blocked;
+ 				break;
++			}
+ 			/* Filesystem lock operation is in progress
+ 			   Add it to the queue waiting for callback */
+ 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
+@@ -434,8 +441,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			goto out;
+ 	}
+ 
+-	ret = nlm_lck_blocked;
+-
+ 	/* Append to list of blocked */
+ 	nlmsvc_insert_block(block, NLM_NEVER);
+ out:
+@@ -458,8 +463,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 	__be32			ret;
+ 
+ 	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_type,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end);
+@@ -547,8 +552,8 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
+ 	int	error;
+ 
+ 	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_pid,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end);
+@@ -576,8 +581,8 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
+ 	int status = 0;
+ 
+ 	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
+-				file->f_file->f_path.dentry->d_inode->i_sb->s_id,
+-				file->f_file->f_path.dentry->d_inode->i_ino,
++				file->f_file->f_dentry->d_inode->i_sb->s_id,
++				file->f_file->f_dentry->d_inode->i_ino,
+ 				lock->fl.fl_pid,
+ 				(long long)lock->fl.fl_start,
+ 				(long long)lock->fl.fl_end);
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index 198b4e5..2109091 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -45,7 +45,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
+ 
+ static inline void nlm_debug_print_file(char *msg, struct nlm_file *file)
+ {
+-	struct inode *inode = file->f_file->f_path.dentry->d_inode;
++	struct inode *inode = file->f_file->f_dentry->d_inode;
+ 
+ 	dprintk("lockd: %s %s/%ld\n",
+ 		msg, inode->i_sb->s_id, inode->i_ino);
+@@ -396,7 +396,7 @@ nlmsvc_match_sb(void *datap, struct nlm_file *file)
+ {
+ 	struct super_block *sb = datap;
+ 
+-	return sb == file->f_file->f_path.mnt->mnt_sb;
++	return sb == file->f_file->f_vfsmnt->mnt_sb;
+ }
+ 
+ /**
+diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
+index ac6170c..16d3d17 100644
+--- a/fs/nfs/Makefile
++++ b/fs/nfs/Makefile
+@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
+ 
+ nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
+ 			   direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+-			   write.o namespace.o mount_clnt.o
++			   write.o namespace.o mount_clnt.o \
++			   backport-namespace.o backport-writeback.o
+ nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
+ nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
+ nfs-$(CONFIG_NFS_V3_ACL)	+= nfs3acl.o
+diff --git a/fs/nfs/backport-namespace.c b/fs/nfs/backport-namespace.c
+new file mode 100644
+index 0000000..de57f8b
+--- /dev/null
++++ b/fs/nfs/backport-namespace.c
+@@ -0,0 +1 @@
++#include "src/namespace.c"
+diff --git a/fs/nfs/backport-writeback.c b/fs/nfs/backport-writeback.c
+new file mode 100644
+index 0000000..b838ead
+--- /dev/null
++++ b/fs/nfs/backport-writeback.c
+@@ -0,0 +1 @@
++#include "src/writeback.c"
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 5ee23e7..afbb834 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -248,6 +248,7 @@ static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
+ 				(const struct sockaddr_in6 *)sa2);
+ 	}
+ 	BUG();
++	return -EINVAL;
+ }
+ 
+ /*
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 74f92b7..90d0a97 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -66,7 +66,7 @@ const struct file_operations nfs_dir_operations = {
+ 	.fsync		= nfs_fsync_dir,
+ };
+ 
+-const struct inode_operations nfs_dir_inode_operations = {
++struct inode_operations nfs_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -82,7 +82,7 @@ const struct inode_operations nfs_dir_inode_operations = {
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_dir_inode_operations = {
++struct inode_operations nfs3_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_lookup,
+ 	.link		= nfs_link,
+@@ -105,7 +105,7 @@ const struct inode_operations nfs3_dir_inode_operations = {
+ #ifdef CONFIG_NFS_V4
+ 
+ static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
+-const struct inode_operations nfs4_dir_inode_operations = {
++struct inode_operations nfs4_dir_inode_operations = {
+ 	.create		= nfs_create,
+ 	.lookup		= nfs_atomic_lookup,
+ 	.link		= nfs_link,
+@@ -134,8 +134,8 @@ nfs_opendir(struct inode *inode, struct file *filp)
+ 	int res;
+ 
+ 	dfprintk(FILE, "NFS: open dir(%s/%s)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name);
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name);
+ 
+ 	nfs_inc_stats(inode, NFSIOS_VFSOPEN);
+ 
+@@ -175,7 +175,7 @@ static
+ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
+ {
+ 	struct file	*file = desc->file;
+-	struct inode	*inode = file->f_path.dentry->d_inode;
++	struct inode	*inode = file->f_dentry->d_inode;
+ 	struct rpc_cred	*cred = nfs_file_cred(file);
+ 	unsigned long	timestamp;
+ 	int		error;
+@@ -186,7 +186,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
+ 
+  again:
+ 	timestamp = jiffies;
+-	error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, desc->entry->cookie, page,
++	error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->entry->cookie, page,
+ 					  NFS_SERVER(inode)->dtsize, desc->plus);
+ 	if (error < 0) {
+ 		/* We requested READDIRPLUS, but the server doesn't grok it */
+@@ -311,7 +311,7 @@ int find_dirent_index(nfs_readdir_descriptor_t *desc)
+ static inline
+ int find_dirent_page(nfs_readdir_descriptor_t *desc)
+ {
+-	struct inode	*inode = desc->file->f_path.dentry->d_inode;
++	struct inode	*inode = desc->file->f_dentry->d_inode;
+ 	struct page	*page;
+ 	int		status;
+ 
+@@ -467,7 +467,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+ 		     filldir_t filldir)
+ {
+ 	struct file	*file = desc->file;
+-	struct inode	*inode = file->f_path.dentry->d_inode;
++	struct inode	*inode = file->f_dentry->d_inode;
+ 	struct rpc_cred	*cred = nfs_file_cred(file);
+ 	struct page	*page = NULL;
+ 	int		status;
+@@ -482,7 +482,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+ 		goto out;
+ 	}
+ 	timestamp = jiffies;
+-	status = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred,
++	status = NFS_PROTO(inode)->readdir(file->f_dentry, cred,
+ 						*desc->dir_cookie, page,
+ 						NFS_SERVER(inode)->dtsize,
+ 						desc->plus);
+@@ -520,7 +520,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+  */
+ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ {
+-	struct dentry	*dentry = filp->f_path.dentry;
++	struct dentry	*dentry = filp->f_dentry;
+ 	struct inode	*inode = dentry->d_inode;
+ 	nfs_readdir_descriptor_t my_desc,
+ 			*desc = &my_desc;
+@@ -601,7 +601,7 @@ out:
+ 
+ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
+ {
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 	struct inode *inode = dentry->d_inode;
+ 
+ 	dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
+@@ -973,7 +973,7 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd)
+ 	if (nd->flags & LOOKUP_DIRECTORY)
+ 		return 0;
+ 	/* Are we trying to write to a read only partition? */
+-	if (__mnt_is_readonly(nd->path.mnt) &&
++	if (__mnt_is_readonly(nd->mnt) &&
+ 	    (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
+ 		return 0;
+ 	return 1;
+@@ -1083,7 +1083,7 @@ no_open:
+ 
+ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
+ {
+-	struct dentry *parent = desc->file->f_path.dentry;
++	struct dentry *parent = desc->file->f_dentry;
+ 	struct inode *dir = parent->d_inode;
+ 	struct nfs_entry *entry = desc->entry;
+ 	struct dentry *dentry, *alias;
+@@ -1907,7 +1907,7 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+ 	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
+ }
+ 
+-int nfs_permission(struct inode *inode, int mask)
++int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+ {
+ 	struct rpc_cred *cred;
+ 	int res = 0;
+@@ -1917,7 +1917,7 @@ int nfs_permission(struct inode *inode, int mask)
+ 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
+ 		goto out;
+ 	/* Is this sys_access() ? */
+-	if (mask & MAY_ACCESS)
++	if (nd != NULL && (nd->flags & LOOKUP_ACCESS))
+ 		goto force_lookup;
+ 
+ 	switch (inode->i_mode & S_IFMT) {
+@@ -1926,7 +1926,8 @@ int nfs_permission(struct inode *inode, int mask)
+ 		case S_IFREG:
+ 			/* NFSv4 has atomic_open... */
+ 			if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
+-					&& (mask & MAY_OPEN))
++					&& nd != NULL
++					&& (nd->flags & LOOKUP_OPEN))
+ 				goto out;
+ 			break;
+ 		case S_IFDIR:
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 08f6b04..91f5069 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -116,7 +116,7 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
+ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ {
+ 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
+-			iocb->ki_filp->f_path.dentry->d_name.name,
++			iocb->ki_filp->f_dentry->d_name.name,
+ 			(long long) pos, nr_segs);
+ 
+ 	return -EINVAL;
+@@ -891,8 +891,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
+ 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
+ 
+ 	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name,
+ 		count, (long long) pos);
+ 
+ 	retval = 0;
+@@ -948,8 +948,8 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+ 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
+ 
+ 	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name,
+ 		count, (long long) pos);
+ 
+ 	retval = generic_write_checks(file, &pos, &count, 0);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 7846065..affdd75 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -45,16 +45,13 @@ static int  nfs_file_mmap(struct file *, struct vm_area_struct *);
+ static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
+ 					struct pipe_inode_info *pipe,
+ 					size_t count, unsigned int flags);
+-static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
+-				unsigned long nr_segs, loff_t pos);
+-static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
+-				unsigned long nr_segs, loff_t pos);
++static ssize_t nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos);
++static ssize_t nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos);
+ static int  nfs_file_flush(struct file *, fl_owner_t id);
+ static int  nfs_file_fsync(struct file *, struct dentry *dentry, int datasync);
+ static int nfs_check_flags(int flags);
+ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
+ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
+ 
+ static struct vm_operations_struct nfs_file_vm_ops;
+ 
+@@ -77,17 +74,16 @@ const struct file_operations nfs_file_operations = {
+ 	.flock		= nfs_flock,
+ 	.splice_read	= nfs_file_splice_read,
+ 	.check_flags	= nfs_check_flags,
+-	.setlease	= nfs_setlease,
+ };
+ 
+-const struct inode_operations nfs_file_inode_operations = {
++struct inode_operations nfs_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+ };
+ 
+ #ifdef CONFIG_NFS_V3
+-const struct inode_operations nfs3_file_inode_operations = {
++struct inode_operations nfs3_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+@@ -120,8 +116,8 @@ nfs_file_open(struct inode *inode, struct file *filp)
+ 	int res;
+ 
+ 	dprintk("NFS: open file(%s/%s)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name);
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name);
+ 
+ 	res = nfs_check_flags(filp->f_flags);
+ 	if (res)
+@@ -135,7 +131,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
+ static int
+ nfs_file_release(struct inode *inode, struct file *filp)
+ {
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 
+ 	dprintk("NFS: release(%s/%s)\n",
+ 			dentry->d_parent->d_name.name,
+@@ -178,11 +174,9 @@ force_reval:
+ 
+ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ {
+-	loff_t loff;
+-
+ 	dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name,
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name,
+ 			offset, origin);
+ 
+ 	/* origin == SEEK_END => we must revalidate the cached file length */
+@@ -192,10 +186,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ 		if (retval < 0)
+ 			return (loff_t)retval;
+ 	}
+-	lock_kernel();	/* BKL needed? */
+-	loff = generic_file_llseek_unlocked(filp, offset, origin);
+-	unlock_kernel();
+-	return loff;
++	return remote_llseek(filp, offset, origin);
+ }
+ 
+ /*
+@@ -230,7 +221,7 @@ static int
+ nfs_file_flush(struct file *file, fl_owner_t id)
+ {
+ 	struct nfs_open_context *ctx = nfs_file_open_context(file);
+-	struct dentry	*dentry = file->f_path.dentry;
++	struct dentry	*dentry = file->f_dentry;
+ 	struct inode	*inode = dentry->d_inode;
+ 	int		status;
+ 
+@@ -250,16 +241,15 @@ nfs_file_flush(struct file *file, fl_owner_t id)
+ }
+ 
+ static ssize_t
+-nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
+-		unsigned long nr_segs, loff_t pos)
++nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos)
+ {
+-	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
++	struct dentry * dentry = iocb->ki_filp->f_dentry;
+ 	struct inode * inode = dentry->d_inode;
+ 	ssize_t result;
+-	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
+ 
+ 	if (iocb->ki_filp->f_flags & O_DIRECT)
+-		return nfs_file_direct_read(iocb, iov, nr_segs, pos);
++		return nfs_file_direct_read(iocb, &local_iov, 1, pos);
+ 
+ 	dprintk("NFS: read(%s/%s, %lu@%lu)\n",
+ 		dentry->d_parent->d_name.name, dentry->d_name.name,
+@@ -268,7 +258,7 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
+ 	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
+ 	nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count);
+ 	if (!result)
+-		result = generic_file_aio_read(iocb, iov, nr_segs, pos);
++		result = generic_file_aio_read(iocb, buf, count, pos);
+ 	return result;
+ }
+ 
+@@ -277,7 +267,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
+ 		     struct pipe_inode_info *pipe, size_t count,
+ 		     unsigned int flags)
+ {
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 	struct inode *inode = dentry->d_inode;
+ 	ssize_t res;
+ 
+@@ -294,7 +284,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
+ static int
+ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+ {
+-	struct dentry *dentry = file->f_path.dentry;
++	struct dentry *dentry = file->f_dentry;
+ 	struct inode *inode = dentry->d_inode;
+ 	int	status;
+ 
+@@ -337,44 +327,15 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
+  * If the writer ends up delaying the write, the writer needs to
+  * increment the page use counts until he is done with the page.
+  */
+-static int nfs_write_begin(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len, unsigned flags,
+-			struct page **pagep, void **fsdata)
++static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+ {
+-	int ret;
+-	pgoff_t index;
+-	struct page *page;
+-	index = pos >> PAGE_CACHE_SHIFT;
+-
+-	dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
+-		mapping->host->i_ino, len, (long long) pos);
+-
+-	page = __grab_cache_page(mapping, index);
+-	if (!page)
+-		return -ENOMEM;
+-	*pagep = page;
+-
+-	ret = nfs_flush_incompatible(file, page);
+-	if (ret) {
+-		unlock_page(page);
+-		page_cache_release(page);
+-	}
+-	return ret;
++	return nfs_flush_incompatible(file, page);
+ }
+ 
+-static int nfs_write_end(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len, unsigned copied,
+-			struct page *page, void *fsdata)
++static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+ {
+-	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ 	int status;
+-
+-	dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
+-		mapping->host->i_ino, len, (long long) pos);
++	unsigned copied = to - offset;
+ 
+ 	/*
+ 	 * Zero any uninitialised parts of the page, and then mark the page
+@@ -382,14 +343,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
+-		unsigned end = offset + len;
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
+-					end, PAGE_CACHE_SIZE);
++					to, PAGE_CACHE_SIZE);
+ 			SetPageUptodate(page);
+-		} else if (end >= pglen) {
+-			zero_user_segment(page, end, PAGE_CACHE_SIZE);
++		} else if (to >= pglen) {
++			zero_user_segment(page, to, PAGE_CACHE_SIZE);
+ 			if (offset == 0)
+ 				SetPageUptodate(page);
+ 		} else
+@@ -398,9 +358,6 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 
+ 	status = nfs_updatepage(file, page, offset, copied);
+ 
+-	unlock_page(page);
+-	page_cache_release(page);
+-
+ 	if (status < 0)
+ 		return status;
+ 	return copied;
+@@ -424,34 +381,23 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+ 	return 0;
+ }
+ 
+-static int nfs_launder_page(struct page *page)
+-{
+-	struct inode *inode = page->mapping->host;
+-
+-	dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
+-		inode->i_ino, (long long)page_offset(page));
+-
+-	return nfs_wb_page(inode, page);
+-}
+-
+ const struct address_space_operations nfs_file_aops = {
+ 	.readpage = nfs_readpage,
+ 	.readpages = nfs_readpages,
+ 	.set_page_dirty = __set_page_dirty_nobuffers,
+ 	.writepage = nfs_writepage,
+ 	.writepages = nfs_writepages,
+-	.write_begin = nfs_write_begin,
+-	.write_end = nfs_write_end,
++	.prepare_write = nfs_prepare_write,
++	.commit_write = nfs_commit_write,
+ 	.invalidatepage = nfs_invalidate_page,
+ 	.releasepage = nfs_release_page,
+ 	.direct_IO = nfs_direct_IO,
+-	.launder_page = nfs_launder_page,
+ };
+ 
+ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+ {
+ 	struct file *filp = vma->vm_file;
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = filp->f_dentry;
+ 	unsigned pagelen;
+ 	int ret = -EINVAL;
+ 	struct address_space *mapping;
+@@ -484,7 +430,8 @@ out_unlock:
+ }
+ 
+ static struct vm_operations_struct nfs_file_vm_ops = {
+-	.fault = filemap_fault,
++	.nopage		= filemap_nopage,
++	.populate	= filemap_populate,
+ 	.page_mkwrite = nfs_vm_page_mkwrite,
+ };
+ 
+@@ -500,16 +447,16 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+ 	return 0;
+ }
+ 
+-static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+-				unsigned long nr_segs, loff_t pos)
++static ssize_t
++nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
+ {
+-	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
++	struct dentry * dentry = iocb->ki_filp->f_dentry;
+ 	struct inode * inode = dentry->d_inode;
+ 	ssize_t result;
+-	size_t count = iov_length(iov, nr_segs);
++	struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count };
+ 
+ 	if (iocb->ki_filp->f_flags & O_DIRECT)
+-		return nfs_file_direct_write(iocb, iov, nr_segs, pos);
++		return nfs_file_direct_write(iocb, &local_iov, 1, pos);
+ 
+ 	dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
+ 		dentry->d_parent->d_name.name, dentry->d_name.name,
+@@ -532,7 +479,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+ 		goto out;
+ 
+ 	nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
+-	result = generic_file_aio_write(iocb, iov, nr_segs, pos);
++	result = generic_file_aio_write(iocb, buf, count, pos);
+ 	/* Return error values for O_SYNC and IS_SYNC() */
+ 	if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
+ 		int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
+@@ -549,14 +496,20 @@ out_swapfile:
+ 
+ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
+ {
++	struct file_lock cfl;
+ 	struct inode *inode = filp->f_mapping->host;
+ 	int status = 0;
+ 
+ 	lock_kernel();
+ 	/* Try local locking first */
+-	posix_test_lock(filp, fl);
++	posix_test_lock(filp, fl, &cfl);
+ 	if (fl->fl_type != F_UNLCK) {
+ 		/* found a conflict */
++		fl->fl_start = cfl.fl_start;
++		fl->fl_end = cfl.fl_end;
++		fl->fl_type = cfl.fl_type;
++		fl->fl_pid = cfl.fl_pid;
++
+ 		goto out;
+ 	}
+ 
+@@ -662,8 +615,8 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+ 	int ret = -ENOLCK;
+ 
+ 	dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name,
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name,
+ 			fl->fl_type, fl->fl_flags,
+ 			(long long)fl->fl_start, (long long)fl->fl_end);
+ 
+@@ -695,8 +648,8 @@ out_err:
+ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+ {
+ 	dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
+-			filp->f_path.dentry->d_parent->d_name.name,
+-			filp->f_path.dentry->d_name.name,
++			filp->f_dentry->d_parent->d_name.name,
++			filp->f_dentry->d_name.name,
+ 			fl->fl_type, fl->fl_flags);
+ 
+ 	/*
+@@ -718,16 +671,3 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+ 		return do_unlk(filp, cmd, fl);
+ 	return do_setlk(filp, cmd, fl);
+ }
+-
+-/*
+- * There is no protocol support for leases, so we have no way to implement
+- * them correctly in the face of opens by other clients.
+- */
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+-{
+-	dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
+-			file->f_path.dentry->d_parent->d_name.name,
+-			file->f_path.dentry->d_name.name, arg);
+-
+-	return -EINVAL;
+-}
+diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
+index fae9719..5bf9b3c 100644
+--- a/fs/nfs/getroot.c
++++ b/fs/nfs/getroot.c
+@@ -30,7 +30,6 @@
+ #include <linux/nfs_idmap.h>
+ #include <linux/vfs.h>
+ #include <linux/namei.h>
+-#include <linux/mnt_namespace.h>
+ #include <linux/security.h>
+ 
+ #include <asm/system.h>
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 86147b0..148aebe 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -376,7 +376,7 @@ idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ static ssize_t
+ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ {
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	struct idmap *idmap = (struct idmap *)rpci->private;
+ 	struct idmap_msg im_in, *im = &idmap->idmap_im;
+ 	struct idmap_hashtable *h;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 52daefa..6a5b54c 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -612,7 +612,7 @@ static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
+  */
+ static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct nfs_inode *nfsi = NFS_I(inode);
+ 
+ 	filp->private_data = get_nfs_open_context(ctx);
+@@ -644,7 +644,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
+ 
+ static void nfs_file_clear_open_context(struct file *filp)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct nfs_open_context *ctx = nfs_file_open_context(filp);
+ 
+ 	if (ctx) {
+@@ -667,7 +667,7 @@ int nfs_open(struct inode *inode, struct file *filp)
+ 	cred = rpc_lookup_cred();
+ 	if (IS_ERR(cred))
+ 		return PTR_ERR(cred);
+-	ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
++	ctx = alloc_nfs_open_context(filp->f_vfsmnt, filp->f_dentry, cred);
+ 	put_rpccred(cred);
+ 	if (ctx == NULL)
+ 		return -ENOMEM;
+@@ -1242,7 +1242,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
+ #endif
+ }
+ 
+-static void init_once(void *foo)
++static void init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
+ 
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 66df08d..1e11b1d 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -107,29 +107,29 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 
+ 	BUG_ON(IS_ROOT(dentry));
+ 	dprintk("%s: enter\n", __func__);
+-	dput(nd->path.dentry);
+-	nd->path.dentry = dget(dentry);
++	dput(nd->dentry);
++	nd->dentry = dget(dentry);
+ 
+ 	/* Look it up again */
+-	parent = dget_parent(nd->path.dentry);
++	parent = dget_parent(nd->dentry);
+ 	err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
+-						  &nd->path.dentry->d_name,
++						  &nd->dentry->d_name,
+ 						  &fh, &fattr);
+ 	dput(parent);
+ 	if (err != 0)
+ 		goto out_err;
+ 
+ 	if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
+-		mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
++		mnt = nfs_do_refmount(nd->mnt, nd->dentry);
+ 	else
+-		mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
++		mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh,
+ 				      &fattr);
+ 	err = PTR_ERR(mnt);
+ 	if (IS_ERR(mnt))
+ 		goto out_err;
+ 
+ 	mntget(mnt);
+-	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
++	err = do_add_mount(mnt, nd, nd->mnt->mnt_flags|MNT_SHRINKABLE,
+ 			   &nfs_automount_list);
+ 	if (err < 0) {
+ 		mntput(mnt);
+@@ -137,9 +137,9 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 			goto out_follow;
+ 		goto out_err;
+ 	}
+-	path_put(&nd->path);
+-	nd->path.mnt = mnt;
+-	nd->path.dentry = dget(mnt->mnt_root);
++	backport_path_put(nd);
++	nd->mnt = mnt;
++	nd->dentry = dget(mnt->mnt_root);
+ 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+ out:
+ 	dprintk("%s: done, returned %d\n", __func__, err);
+@@ -147,22 +147,22 @@ out:
+ 	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
+ 	return ERR_PTR(err);
+ out_err:
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	goto out;
+ out_follow:
+-	while (d_mountpoint(nd->path.dentry) &&
+-	       follow_down(&nd->path.mnt, &nd->path.dentry))
++	while (d_mountpoint(nd->dentry) &&
++	       follow_down(&nd->mnt, &nd->dentry))
+ 		;
+ 	err = 0;
+ 	goto out;
+ }
+ 
+-const struct inode_operations nfs_mountpoint_inode_operations = {
++struct inode_operations nfs_mountpoint_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ 	.getattr	= nfs_getattr,
+ };
+ 
+-const struct inode_operations nfs_referral_inode_operations = {
++struct inode_operations nfs_referral_inode_operations = {
+ 	.follow_link	= nfs_follow_mountpoint,
+ };
+ 
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 1e750e4..bdeef69 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -779,7 +779,7 @@ static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
+ static int
+ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 
+ 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ }
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index ea79064..7a8e6fa 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -165,7 +165,7 @@ struct nfs4_state_recovery_ops {
+ };
+ 
+ extern struct dentry_operations nfs4_dentry_operations;
+-extern const struct inode_operations nfs4_dir_inode_operations;
++extern struct inode_operations nfs4_dir_inode_operations;
+ 
+ /* inode.c */
+ extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c910413..02f1156 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1384,7 +1384,7 @@ struct dentry *
+ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct dentry *parent;
+@@ -1421,8 +1421,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ 	}
+ 	res = d_add_unique(dentry, igrab(state->inode));
+ 	if (res != NULL)
+-		path.dentry = res;
+-	nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
++		dentry = res;
++	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 	nfs_unblock_sillyrename(parent);
+ 	nfs4_intent_set_file(nd, &path, state);
+ 	return res;
+@@ -1432,7 +1432,7 @@ int
+ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct rpc_cred *cred;
+@@ -1880,7 +1880,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+                  int flags, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct nfs4_state *state;
+@@ -3671,7 +3671,7 @@ struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = {
+ 	.recover_lock	= nfs4_lock_expired,
+ };
+ 
+-static const struct inode_operations nfs4_file_inode_operations = {
++static struct inode_operations nfs4_file_inode_operations = {
+ 	.permission	= nfs_permission,
+ 	.getattr	= nfs_getattr,
+ 	.setattr	= nfs_setattr,
+diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
+index 4dbb84d..c351a41 100644
+--- a/fs/nfs/proc.c
++++ b/fs/nfs/proc.c
+@@ -595,7 +595,7 @@ nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+ static int
+ nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 
+ 	return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+ }
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index e9b2017..bd232ac 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -201,7 +201,7 @@ static match_table_t nfs_secflavor_tokens = {
+ };
+ 
+ 
+-static void nfs_umount_begin(struct super_block *);
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags);
+ static int  nfs_statfs(struct dentry *, struct kstatfs *);
+ static int  nfs_show_options(struct seq_file *, struct vfsmount *);
+ static int  nfs_show_stats(struct seq_file *, struct vfsmount *);
+@@ -228,7 +228,7 @@ struct file_system_type nfs_xdev_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
+ 
+-static const struct super_operations nfs_sops = {
++static struct super_operations nfs_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
+@@ -274,7 +274,7 @@ struct file_system_type nfs4_referral_fs_type = {
+ 	.fs_flags	= FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
+ 
+-static const struct super_operations nfs4_sops = {
++static struct super_operations nfs4_sops = {
+ 	.alloc_inode	= nfs_alloc_inode,
+ 	.destroy_inode	= nfs_destroy_inode,
+ 	.write_inode	= nfs_write_inode,
+@@ -287,10 +287,7 @@ static const struct super_operations nfs4_sops = {
+ };
+ #endif
+ 
+-static struct shrinker acl_shrinker = {
+-	.shrink		= nfs_access_cache_shrinker,
+-	.seeks		= DEFAULT_SEEKS,
+-};
++static struct shrinker *acl_shrinker;
+ 
+ /*
+  * Register the NFS filesystems
+@@ -311,7 +308,10 @@ int __init register_nfs_fs(void)
+ 	if (ret < 0)
+ 		goto error_2;
+ #endif
+-	register_shrinker(&acl_shrinker);
++	ret = init_mnt_writers();
++	if (ret)
++		printk(KERN_WARNING "Couldn't init mnt_writers\n");
++	acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ 	return 0;
+ 
+ #ifdef CONFIG_NFS_V4
+@@ -329,7 +329,8 @@ error_0:
+  */
+ void __exit unregister_nfs_fs(void)
+ {
+-	unregister_shrinker(&acl_shrinker);
++	if (acl_shrinker != NULL)
++		remove_shrinker(acl_shrinker);
+ #ifdef CONFIG_NFS_V4
+ 	unregister_filesystem(&nfs4_fs_type);
+ #endif
+@@ -649,11 +650,13 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+  * Begin unmount by attempting to remove all automounted mountpoints we added
+  * in response to xdev traversals and referrals
+  */
+-static void nfs_umount_begin(struct super_block *sb)
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
+ {
+-	struct nfs_server *server = NFS_SB(sb);
++	struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb);
+ 	struct rpc_clnt *rpc;
+ 
++	if (!(flags & MNT_FORCE))
++		return;
+ 	/* -EIO all pending I/O */
+ 	rpc = server->client_acl;
+ 	if (!IS_ERR(rpc))
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 412738d..b17f14a 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -70,7 +70,7 @@ read_failed:
+ /*
+  * symlinks can't do much...
+  */
+-const struct inode_operations nfs_symlink_inode_operations = {
++struct inode_operations nfs_symlink_inode_operations = {
+ 	.readlink	= generic_readlink,
+ 	.follow_link	= nfs_follow_link,
+ 	.put_link	= page_put_link,
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 3229e21..a405394 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -12,6 +12,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/file.h>
+ #include <linux/writeback.h>
++#include <linux/mpage.h>
+ #include <linux/swap.h>
+ 
+ #include <linux/sunrpc/clnt.h>
+@@ -726,8 +727,8 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
+ 
+ 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name, count,
++		file->f_dentry->d_parent->d_name.name,
++		file->f_dentry->d_name.name, count,
+ 		(long long)(page_offset(page) + offset));
+ 
+ 	/* If we're not using byte range locks, and we know the page
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 9dc036f..4bafc01 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -168,14 +168,15 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 			goto out;
+ 
+ 		dprintk("Found the path %s\n", buf);
+-		key.ek_path = nd.path;
++		key.ek_path.dentry = nd.dentry;
++		key.ek_path.mnt = nd.mnt;
+ 
+ 		ek = svc_expkey_update(&key, ek);
+ 		if (ek)
+ 			cache_put(&ek->h, &svc_expkey_cache);
+ 		else
+ 			err = -ENOMEM;
+-		path_put(&nd.path);
++		backport_path_put(&nd);
+ 	}
+ 	cache_flush();
+  out:
+@@ -204,7 +205,7 @@ static int expkey_show(struct seq_file *m,
+ 	if (test_bit(CACHE_VALID, &h->flags) && 
+ 	    !test_bit(CACHE_NEGATIVE, &h->flags)) {
+ 		seq_printf(m, " ");
+-		seq_path(m, &ek->ek_path, "\\ \t\n");
++		seq_path(m, ek->ek_path.mnt, ek->ek_path.dentry, "\\ \t\n");
+ 	}
+ 	seq_printf(m, "\n");
+ 	return 0;
+@@ -346,7 +347,7 @@ static void svc_export_request(struct cache_detail *cd,
+ 	char *pth;
+ 
+ 	qword_add(bpp, blen, exp->ex_client->name);
+-	pth = d_path(&exp->ex_path, *bpp, *blen);
++	pth = d_path(exp->ex_path.dentry, exp->ex_path.mnt, *bpp, *blen);
+ 	if (IS_ERR(pth)) {
+ 		/* is this correct? */
+ 		(*bpp)[0] = '\n';
+@@ -385,7 +386,7 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
+ 	}
+ 
+ 	if (!inode->i_sb->s_export_op ||
+-	    !inode->i_sb->s_export_op->fh_to_dentry) {
++	    !inode->i_sb->s_export_op->get_dentry) {
+ 		dprintk("exp_export: export of invalid fs type.\n");
+ 		return -EINVAL;
+ 	}
+@@ -504,7 +505,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	struct svc_export exp, *expp;
+ 	int an_int;
+ 
+-	nd.path.dentry = NULL;
++	nd.dentry = NULL;
+ 	exp.ex_pathname = NULL;
+ 
+ 	/* fs locations */
+@@ -544,8 +545,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 
+ 	exp.h.flags = 0;
+ 	exp.ex_client = dom;
+-	exp.ex_path.mnt = nd.path.mnt;
+-	exp.ex_path.dentry = nd.path.dentry;
++	exp.ex_path.mnt = nd.mnt;
++	exp.ex_path.dentry = nd.dentry;
+ 	exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
+ 	err = -ENOMEM;
+ 	if (!exp.ex_pathname)
+@@ -607,7 +608,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 				goto out;
+ 		}
+ 
+-		err = check_export(nd.path.dentry->d_inode, exp.ex_flags,
++		err = check_export(nd.dentry->d_inode, exp.ex_flags,
+ 				   exp.ex_uuid);
+ 		if (err) goto out;
+ 	}
+@@ -626,8 +627,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	nfsd4_fslocs_free(&exp.ex_fslocs);
+ 	kfree(exp.ex_uuid);
+ 	kfree(exp.ex_pathname);
+-	if (nd.path.dentry)
+-		path_put(&nd.path);
++	if (nd.dentry)
++		backport_path_put(&nd);
+  out_no_path:
+ 	if (dom)
+ 		auth_domain_put(dom);
+@@ -650,7 +651,7 @@ static int svc_export_show(struct seq_file *m,
+ 		return 0;
+ 	}
+ 	exp = container_of(h, struct svc_export, h);
+-	seq_path(m, &exp->ex_path, " \t\n\\");
++	seq_path(m, exp->ex_path.mnt, exp->ex_path.dentry, " \t\n\\");
+ 	seq_putc(m, '\t');
+ 	seq_escape(m, exp->ex_client->name, " \t\n\\");
+ 	seq_putc(m, '(');
+@@ -1026,7 +1027,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto out_put_clp;
+ 	err = -EINVAL;
+ 
+-	exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
+ 
+ 	memset(&new, 0, sizeof(new));
+ 
+@@ -1034,8 +1035,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if ((nxp->ex_flags & NFSEXP_FSID) &&
+ 	    (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
+ 	    fsid_key->ek_path.mnt &&
+-	    (fsid_key->ek_path.mnt != nd.path.mnt ||
+-	     fsid_key->ek_path.dentry != nd.path.dentry))
++	    (fsid_key->ek_path.mnt != nd.mnt ||
++	     fsid_key->ek_path.dentry != nd.dentry))
+ 		goto finish;
+ 
+ 	if (!IS_ERR(exp)) {
+@@ -1051,7 +1052,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto finish;
+ 	}
+ 
+-	err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL);
++	err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL);
+ 	if (err) goto finish;
+ 
+ 	err = -ENOMEM;
+@@ -1064,7 +1065,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if (!new.ex_pathname)
+ 		goto finish;
+ 	new.ex_client = clp;
+-	new.ex_path = nd.path;
++	new.ex_path.mnt = nd.mnt;
++	new.ex_path.dentry = nd.dentry;
+ 	new.ex_flags = nxp->ex_flags;
+ 	new.ex_anon_uid = nxp->ex_anon_uid;
+ 	new.ex_anon_gid = nxp->ex_anon_gid;
+@@ -1090,7 +1092,7 @@ finish:
+ 		exp_put(exp);
+ 	if (fsid_key && !IS_ERR(fsid_key))
+ 		cache_put(&fsid_key->h, &svc_expkey_cache);
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ out_put_clp:
+ 	auth_domain_put(clp);
+ out_unlock:
+@@ -1143,8 +1145,8 @@ exp_unexport(struct nfsctl_export *nxp)
+ 		goto out_domain;
+ 
+ 	err = -EINVAL;
+-	exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
+-	path_put(&nd.path);
++	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
++	backport_path_put(&nd);
+ 	if (IS_ERR(exp))
+ 		goto out_domain;
+ 
+@@ -1180,12 +1182,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 		printk("nfsd: exp_rootfh path not found %s", path);
+ 		return err;
+ 	}
+-	inode = nd.path.dentry->d_inode;
++	inode = nd.dentry->d_inode;
+ 
+ 	dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
+-		 path, nd.path.dentry, clp->name,
++		 path, nd.dentry, clp->name,
+ 		 inode->i_sb->s_id, inode->i_ino);
+-	exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
+ 	if (IS_ERR(exp)) {
+ 		err = PTR_ERR(exp);
+ 		goto out;
+@@ -1195,7 +1197,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	 * fh must be initialized before calling fh_compose
+ 	 */
+ 	fh_init(&fh, maxsize);
+-	if (fh_compose(&fh, exp, nd.path.dentry, NULL))
++	if (fh_compose(&fh, exp, nd.dentry, NULL))
+ 		err = -EINVAL;
+ 	else
+ 		err = 0;
+@@ -1203,7 +1205,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	fh_put(&fh);
+ 	exp_put(exp);
+ out:
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return err;
+ }
+ 
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 145b3c8..ad22c29 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -121,9 +121,9 @@ out_no_tfm:
+ static void
+ nfsd4_sync_rec_dir(void)
+ {
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	nfsd_sync_dir(rec_dir.path.dentry);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	nfsd_sync_dir(rec_dir.dentry);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ }
+ 
+ int
+@@ -143,9 +143,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 	nfs4_save_user(&uid, &gid);
+ 
+ 	/* lock the parent */
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
+ 
+-	dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1);
++	dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		goto out_unlock;
+@@ -155,15 +155,15 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 		dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
+ 		goto out_put;
+ 	}
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out_put;
+-	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
+-	mnt_drop_write(rec_dir.path.mnt);
++	status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU);
++	mnt_drop_write(rec_dir.mnt);
+ out_put:
+ 	dput(dentry);
+ out_unlock:
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (status == 0) {
+ 		clp->cl_firststate = 1;
+ 		nfsd4_sync_rec_dir();
+@@ -226,7 +226,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
+ 
+ 	nfs4_save_user(&uid, &gid);
+ 
+-	filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY);
++	filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
+ 	status = PTR_ERR(filp);
+ 	if (IS_ERR(filp))
+ 		goto out;
+@@ -291,9 +291,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+ 
+ 	dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
+ 
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	dentry = lookup_one_len(name, rec_dir.path.dentry, namlen);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	dentry = lookup_one_len(name, rec_dir.dentry, namlen);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		return status;
+@@ -302,7 +302,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+ 	if (!dentry->d_inode)
+ 		goto out;
+ 
+-	status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry);
++	status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
+ out:
+ 	dput(dentry);
+ 	return status;
+@@ -318,7 +318,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	if (!rec_dir_init || !clp->cl_firststate)
+ 		return;
+ 
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+ 	clp->cl_firststate = 0;
+@@ -327,7 +327,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	nfs4_reset_user(uid, gid);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("NFSD: Failed to remove expired client state directory"
+@@ -357,17 +357,17 @@ nfsd4_recdir_purge_old(void) {
+ 
+ 	if (!rec_dir_init)
+ 		return;
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("nfsd4: failed to purge old clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ }
+ 
+ static int
+@@ -387,10 +387,10 @@ int
+ nfsd4_recdir_load(void) {
+ 	int status;
+ 
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir);
+ 	if (status)
+ 		printk("nfsd4: failed loading clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ 	return status;
+ }
+ 
+@@ -429,5 +429,5 @@ nfsd4_shutdown_recdir(void)
+ 	if (!rec_dir_init)
+ 		return;
+ 	rec_dir_init = 0;
+-	path_put(&rec_dir.path);
++	backport_path_put(&rec_dir);
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 1578d7a..1c6df07 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1576,7 +1576,7 @@ static __be32
+ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
+ {
+ 	struct file *filp = stp->st_vfs_file;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	unsigned int share_access, new_writer;
+ 	__be32 status;
+ 
+@@ -1923,7 +1923,7 @@ search_close_lru(u32 st_id, int flags)
+ static inline int
+ nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
+ {
+-	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode;
++	return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_dentry->d_inode;
+ }
+ 
+ static int
+@@ -2838,7 +2838,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	 * only the dentry:inode set.
+ 	 */
+ 	memset(&file, 0, sizeof (struct file));
+-	file.f_path.dentry = cstate->current_fh.fh_dentry;
++	file.f_dentry = cstate->current_fh.fh_dentry;
+ 
+ 	status = nfs_ok;
+ 	error = vfs_test_lock(&file, &file_lock);
+@@ -2934,7 +2934,7 @@ static int
+ check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
+ {
+ 	struct file_lock **flpp;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	int status = 0;
+ 
+ 	lock_kernel();
+@@ -3294,11 +3294,11 @@ nfs4_reset_recoverydir(char *recdir)
+ 	if (status)
+ 		return status;
+ 	status = -ENOTDIR;
+-	if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
++	if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
+ 		nfs4_set_recdir(recdir);
+ 		status = 0;
+ 	}
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return status;
+ }
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index c53e65f..fc2871b 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -121,7 +121,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+ 
+ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+ {
+-	ino_t ino =  file->f_path.dentry->d_inode->i_ino;
++	ino_t ino = file->f_dentry->d_inode->i_ino;
+ 	char *data;
+ 	ssize_t rv;
+ 
+@@ -360,9 +360,9 @@ static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+ 	if (error)
+ 		return error;
+ 
+-	error = nlmsvc_unlock_all_by_sb(nd.path.mnt->mnt_sb);
++	error = nlmsvc_unlock_all_by_sb(nd.mnt->mnt_sb);
+ 
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return error;
+ }
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 80292ff..47eb160 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -574,3 +574,5 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ 	nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
+ 	return 1;
+ }
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 18060be..fbd0f97 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -23,7 +23,6 @@
+ #include <linux/file.h>
+ #include <linux/mount.h>
+ #include <linux/major.h>
+-#include <linux/splice.h>
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -765,11 +764,11 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
+ static int
+ nfsd_sync(struct file *filp)
+ {
+-        int err;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
+-	dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name);
++	int err;
++	struct inode *inode = filp->f_dentry->d_inode;
++	dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
+ 	mutex_lock(&inode->i_mutex);
+-	err=nfsd_dosync(filp, filp->f_path.dentry, filp->f_op);
++	err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
+ 	mutex_unlock(&inode->i_mutex);
+ 
+ 	return err;
+@@ -828,53 +827,39 @@ found:
+ 	return ra;
+ }
+ 
+-/*
+- * Grab and keep cached pages associated with a file in the svc_rqst
+- * so that they can be passed to the network sendmsg/sendpage routines
+- * directly. They will be released after the sending has completed.
+- */
+ static int
+-nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+-		  struct splice_desc *sd)
++nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset , unsigned long size)
+ {
+-	struct svc_rqst *rqstp = sd->u.data;
++	unsigned long count = desc->count;
++	struct svc_rqst *rqstp = desc->arg.data;
+ 	struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
+-	struct page *page = buf->page;
+-	size_t size;
+-	int ret;
+ 
+-	ret = buf->ops->confirm(pipe, buf);
+-	if (unlikely(ret))
+-		return ret;
+-
+-	size = sd->len;
++	if (size > count)
++		size = count;
+ 
+ 	if (rqstp->rq_res.page_len == 0) {
+ 		get_page(page);
+-		put_page(*pp);
+-		*pp = page;
+-		rqstp->rq_resused++;
+-		rqstp->rq_res.page_base = buf->offset;
++		if (*pp)
++			put_page(*pp);
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
++		rqstp->rq_res.page_base = offset;
+ 		rqstp->rq_res.page_len = size;
+-	} else if (page != pp[-1]) {
++	} else if (page != rqstp->rq_respages[rqstp->rq_resused-1]) {
+ 		get_page(page);
+ 		if (*pp)
+ 			put_page(*pp);
+ 		*pp = page;
+-		rqstp->rq_resused++;
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
+ 		rqstp->rq_res.page_len += size;
+-	} else
++	} else {
+ 		rqstp->rq_res.page_len += size;
++	}
+ 
++	desc->count = count - size;
++	desc->written += size;
+ 	return size;
+ }
+ 
+-static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
+-				    struct splice_desc *sd)
+-{
+-	return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
+-}
+-
+ static inline int svc_msnfs(struct svc_fh *ffhp)
+ {
+ #ifdef MSNFS
+@@ -895,7 +880,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	int		host_err;
+ 
+ 	err = nfserr_perm;
+-	inode = file->f_path.dentry->d_inode;
++	inode = file->f_dentry->d_inode;
+ 
+ 	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
+ 		goto out;
+@@ -906,16 +891,9 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	if (ra && ra->p_set)
+ 		file->f_ra = ra->p_ra;
+ 
+-	if (file->f_op->splice_read && rqstp->rq_splice_ok) {
+-		struct splice_desc sd = {
+-			.len		= 0,
+-			.total_len	= *count,
+-			.pos		= offset,
+-			.u.data		= rqstp,
+-		};
+-
++	if (file->f_op->sendfile && rqstp->rq_sendfile_ok) {
+ 		rqstp->rq_resused = 1;
+-		host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
++		host_err = file->f_op->sendfile(file, &offset, *count, nfsd_read_actor, rqstp);
+ 	} else {
+ 		oldfs = get_fs();
+ 		set_fs(KERNEL_DS);
+@@ -937,7 +915,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 		nfsdstats.io_read += host_err;
+ 		*count = host_err;
+ 		err = 0;
+-		fsnotify_access(file->f_path.dentry);
++		fsnotify_access(file->f_dentry);
+ 	} else 
+ 		err = nfserrno(host_err);
+ out:
+@@ -971,11 +949,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	err = nfserr_perm;
+ 
+ 	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
+-		(!lock_may_write(file->f_path.dentry->d_inode, offset, cnt)))
++		(!lock_may_write(file->f_dentry->d_inode, offset, cnt)))
+ 		goto out;
+ #endif
+ 
+-	dentry = file->f_path.dentry;
++	dentry = file->f_dentry;
+ 	inode = dentry->d_inode;
+ 	exp   = fhp->fh_export;
+ 
+@@ -1004,7 +982,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	set_fs(oldfs);
+ 	if (host_err >= 0) {
+ 		nfsdstats.io_write += cnt;
+-		fsnotify_modify(file->f_path.dentry);
++		fsnotify_modify(file->f_dentry);
+ 	}
+ 
+ 	/* clear setuid/setgid flag after write */
+diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
+index 27e772c..d932fb1 100644
+--- a/include/linux/exportfs.h
++++ b/include/linux/exportfs.h
+@@ -89,85 +89,9 @@ struct fid {
+ 	};
+ };
+ 
+-/**
+- * struct export_operations - for nfsd to communicate with file systems
+- * @encode_fh:      encode a file handle fragment from a dentry
+- * @fh_to_dentry:   find the implied object and get a dentry for it
+- * @fh_to_parent:   find the implied object's parent and get a dentry for it
+- * @get_name:       find the name for a given inode in a given directory
+- * @get_parent:     find the parent of a given directory
+- *
+- * See Documentation/filesystems/Exporting for details on how to use
+- * this interface correctly.
+- *
+- * encode_fh:
+- *    @encode_fh should store in the file handle fragment @fh (using at most
+- *    @max_len bytes) information that can be used by @decode_fh to recover the
+- *    file refered to by the &struct dentry @de.  If the @connectable flag is
+- *    set, the encode_fh() should store sufficient information so that a good
+- *    attempt can be made to find not only the file but also it's place in the
+- *    filesystem.   This typically means storing a reference to de->d_parent in
+- *    the filehandle fragment.  encode_fh() should return the number of bytes
+- *    stored or a negative error code such as %-ENOSPC
+- *
+- * fh_to_dentry:
+- *    @fh_to_dentry is given a &struct super_block (@sb) and a file handle
+- *    fragment (@fh, @fh_len). It should return a &struct dentry which refers
+- *    to the same file that the file handle fragment refers to.  If it cannot,
+- *    it should return a %NULL pointer if the file was found but no acceptable
+- *    &dentries were available, or an %ERR_PTR error code indicating why it
+- *    couldn't be found (e.g. %ENOENT or %ENOMEM).  Any suitable dentry can be
+- *    returned including, if necessary, a new dentry created with d_alloc_root.
+- *    The caller can then find any other extant dentries by following the
+- *    d_alias links.
+- *
+- * fh_to_parent:
+- *    Same as @fh_to_dentry, except that it returns a pointer to the parent
+- *    dentry if it was encoded into the filehandle fragment by @encode_fh.
+- *
+- * get_name:
+- *    @get_name should find a name for the given @child in the given @parent
+- *    directory.  The name should be stored in the @name (with the
+- *    understanding that it is already pointing to a a %NAME_MAX+1 sized
+- *    buffer.   get_name() should return %0 on success, a negative error code
+- *    or error.  @get_name will be called without @parent->i_mutex held.
+- *
+- * get_parent:
+- *    @get_parent should find the parent directory for the given @child which
+- *    is also a directory.  In the event that it cannot be found, or storage
+- *    space cannot be allocated, a %ERR_PTR should be returned.
+- *
+- * Locking rules:
+- *    get_parent is called with child->d_inode->i_mutex down
+- *    get_name is not (which is possibly inconsistent)
+- */
+-
+-struct export_operations {
+-	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+-			int connectable);
+-	struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	int (*get_name)(struct dentry *parent, char *name,
+-			struct dentry *child);
+-	struct dentry * (*get_parent)(struct dentry *child);
+-};
+-
+ extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
+ 	int *max_len, int connectable);
+ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+ 	void *context);
+-
+-/*
+- * Generic helpers for filesystems.
+- */
+-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-
+ #endif /* LINUX_EXPORTFS_H */
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index dbb87ab..9236e80 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -230,7 +230,7 @@ int           nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+ 
+ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
+ {
+-	return file->f_file->f_path.dentry->d_inode;
++	return file->f_file->f_dentry->d_inode;
+ }
+ 
+ /*
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 78a5922..e59d828 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -9,6 +9,7 @@
+ #ifndef _LINUX_NFS_FS_H
+ #define _LINUX_NFS_FS_H
+ 
++#include <linux/path.h>
+ #include <linux/magic.h>
+ 
+ /* Default timeout values */
+@@ -331,7 +332,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+ extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-extern int nfs_permission(struct inode *, int);
++extern int nfs_permission(struct inode *, int, struct nameidata *);
+ extern int nfs_open(struct inode *, struct file *);
+ extern int nfs_release(struct inode *, struct file *);
+ extern int nfs_attribute_timeout(struct inode *inode);
+@@ -358,9 +359,9 @@ static inline void nfs_fattr_init(struct nfs_fattr *fattr)
+ /*
+  * linux/fs/nfs/file.c
+  */
+-extern const struct inode_operations nfs_file_inode_operations;
++extern struct inode_operations nfs_file_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_file_inode_operations;
++extern struct inode_operations nfs3_file_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_file_operations;
+ extern const struct address_space_operations nfs_file_aops;
+@@ -408,9 +409,9 @@ extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ /*
+  * linux/fs/nfs/dir.c
+  */
+-extern const struct inode_operations nfs_dir_inode_operations;
++extern struct inode_operations nfs_dir_inode_operations;
+ #ifdef CONFIG_NFS_V3
+-extern const struct inode_operations nfs3_dir_inode_operations;
++extern struct inode_operations nfs3_dir_inode_operations;
+ #endif /* CONFIG_NFS_V3 */
+ extern const struct file_operations nfs_dir_operations;
+ extern struct dentry_operations nfs_dentry_operations;
+@@ -423,7 +424,7 @@ extern void nfs_access_zap_cache(struct inode *inode);
+ /*
+  * linux/fs/nfs/symlink.c
+  */
+-extern const struct inode_operations nfs_symlink_inode_operations;
++extern struct inode_operations nfs_symlink_inode_operations;
+ 
+ /*
+  * linux/fs/nfs/sysctl.c
+@@ -439,8 +440,8 @@ extern void nfs_unregister_sysctl(void);
+ /*
+  * linux/fs/nfs/namespace.c
+  */
+-extern const struct inode_operations nfs_mountpoint_inode_operations;
+-extern const struct inode_operations nfs_referral_inode_operations;
++extern struct inode_operations nfs_mountpoint_inode_operations;
++extern struct inode_operations nfs_referral_inode_operations;
+ extern int nfs_mountpoint_expiry_timeout;
+ extern void nfs_release_automount_timer(void);
+ 
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 8c77c11..d9007dc 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -782,8 +782,8 @@ struct nfs_access_entry;
+ struct nfs_rpc_ops {
+ 	u32	version;		/* Protocol version */
+ 	struct dentry_operations *dentry_ops;
+-	const struct inode_operations *dir_inode_ops;
+-	const struct inode_operations *file_inode_ops;
++	struct inode_operations *dir_inode_ops;
++	struct inode_operations *file_inode_ops;
+ 
+ 	int	(*getroot) (struct nfs_server *, struct nfs_fh *,
+ 			    struct nfs_fsinfo *);
+diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
+index 5431512..3753e4b 100644
+--- a/include/linux/nfsd/export.h
++++ b/include/linux/nfsd/export.h
+@@ -15,6 +15,7 @@
+ # include <linux/types.h>
+ # include <linux/in.h>
+ #endif
++#include <linux/path.h>
+ 
+ /*
+  * Important limits for the exports stuff.
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+deleted file mode 100644
+index 8e41202..0000000
+--- a/include/linux/pipe_fs_i.h
++++ /dev/null
+@@ -1,151 +0,0 @@
+-#ifndef _LINUX_PIPE_FS_I_H
+-#define _LINUX_PIPE_FS_I_H
+-
+-#define PIPEFS_MAGIC 0x50495045
+-
+-#define PIPE_BUFFERS (16)
+-
+-#define PIPE_BUF_FLAG_LRU	0x01	/* page is on the LRU */
+-#define PIPE_BUF_FLAG_ATOMIC	0x02	/* was atomically mapped */
+-#define PIPE_BUF_FLAG_GIFT	0x04	/* page is a gift */
+-
+-/**
+- *	struct pipe_buffer - a linux kernel pipe buffer
+- *	@page: the page containing the data for the pipe buffer
+- *	@offset: offset of data inside the @page
+- *	@len: length of data inside the @page
+- *	@ops: operations associated with this buffer. See @pipe_buf_operations.
+- *	@flags: pipe buffer flags. See above.
+- *	@private: private data owned by the ops.
+- **/
+-struct pipe_buffer {
+-	struct page *page;
+-	unsigned int offset, len;
+-	const struct pipe_buf_operations *ops;
+-	unsigned int flags;
+-	unsigned long private;
+-};
+-
+-/**
+- *	struct pipe_inode_info - a linux kernel pipe
+- *	@wait: reader/writer wait point in case of empty/full pipe
+- *	@nrbufs: the number of non-empty pipe buffers in this pipe
+- *	@curbuf: the current pipe buffer entry
+- *	@tmp_page: cached released page
+- *	@readers: number of current readers of this pipe
+- *	@writers: number of current writers of this pipe
+- *	@waiting_writers: number of writers blocked waiting for room
+- *	@r_counter: reader counter
+- *	@w_counter: writer counter
+- *	@fasync_readers: reader side fasync
+- *	@fasync_writers: writer side fasync
+- *	@inode: inode this pipe is attached to
+- *	@bufs: the circular array of pipe buffers
+- **/
+-struct pipe_inode_info {
+-	wait_queue_head_t wait;
+-	unsigned int nrbufs, curbuf;
+-	struct page *tmp_page;
+-	unsigned int readers;
+-	unsigned int writers;
+-	unsigned int waiting_writers;
+-	unsigned int r_counter;
+-	unsigned int w_counter;
+-	struct fasync_struct *fasync_readers;
+-	struct fasync_struct *fasync_writers;
+-	struct inode *inode;
+-	struct pipe_buffer bufs[PIPE_BUFFERS];
+-};
+-
+-/*
+- * Note on the nesting of these functions:
+- *
+- * ->confirm()
+- *	->steal()
+- *	...
+- *	->map()
+- *	...
+- *	->unmap()
+- *
+- * That is, ->map() must be called on a confirmed buffer,
+- * same goes for ->steal(). See below for the meaning of each
+- * operation. Also see kerneldoc in fs/pipe.c for the pipe
+- * and generic variants of these hooks.
+- */
+-struct pipe_buf_operations {
+-	/*
+-	 * This is set to 1, if the generic pipe read/write may coalesce
+-	 * data into an existing buffer. If this is set to 0, a new pipe
+-	 * page segment is always used for new data.
+-	 */
+-	int can_merge;
+-
+-	/*
+-	 * ->map() returns a virtual address mapping of the pipe buffer.
+-	 * The last integer flag reflects whether this should be an atomic
+-	 * mapping or not. The atomic map is faster, however you can't take
+-	 * page faults before calling ->unmap() again. So if you need to eg
+-	 * access user data through copy_to/from_user(), then you must get
+-	 * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
+-	 * atomic maps, so you can't map more than one pipe_buffer at once
+-	 * and you have to be careful if mapping another page as source
+-	 * or destination for a copy (IOW, it has to use something else
+-	 * than KM_USER0).
+-	 */
+-	void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
+-
+-	/*
+-	 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
+-	 */
+-	void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-
+-	/*
+-	 * ->confirm() verifies that the data in the pipe buffer is there
+-	 * and that the contents are good. If the pages in the pipe belong
+-	 * to a file system, we may need to wait for IO completion in this
+-	 * hook. Returns 0 for good, or a negative error value in case of
+-	 * error.
+-	 */
+-	int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * When the contents of this pipe buffer has been completely
+-	 * consumed by a reader, ->release() is called.
+-	 */
+-	void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Attempt to take ownership of the pipe buffer and its contents.
+-	 * ->steal() returns 0 for success, in which case the contents
+-	 * of the pipe (the buf->page) is locked and now completely owned
+-	 * by the caller. The page may then be transferred to a different
+-	 * mapping, the most often used case is insertion into different
+-	 * file address space cache.
+-	 */
+-	int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Get a reference to the pipe buffer.
+-	 */
+-	void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+-};
+-
+-/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+-   memory allocation, whereas PIPE_BUF makes atomicity guarantees.  */
+-#define PIPE_SIZE		PAGE_SIZE
+-
+-/* Drop the inode semaphore and wait for a pipe event, atomically */
+-void pipe_wait(struct pipe_inode_info *pipe);
+-
+-struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
+-void free_pipe_info(struct inode * inode);
+-void __free_pipe_info(struct pipe_inode_info *);
+-
+-/* Generic pipe buffer ops functions */
+-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
+-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-#endif
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+deleted file mode 100644
+index 528dcb9..0000000
+--- a/include/linux/splice.h
++++ /dev/null
+@@ -1,74 +0,0 @@
+-/*
+- * Function declerations and data structures related to the splice
+- * implementation.
+- *
+- * Copyright (C) 2007 Jens Axboe <jens.axboe at oracle.com>
+- *
+- */
+-#ifndef SPLICE_H
+-#define SPLICE_H
+-
+-#include <linux/pipe_fs_i.h>
+-
+-/*
+- * splice is tied to pipes as a transport (at least for now), so we'll just
+- * add the splice flags here.
+- */
+-#define SPLICE_F_MOVE	(0x01)	/* move pages instead of copying */
+-#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+-				 /* we may still block on the fd we splice */
+-				 /* from/to, of course */
+-#define SPLICE_F_MORE	(0x04)	/* expect more data */
+-#define SPLICE_F_GIFT	(0x08)	/* pages passed in are a gift */
+-
+-/*
+- * Passed to the actors
+- */
+-struct splice_desc {
+-	unsigned int len, total_len;	/* current and remaining length */
+-	unsigned int flags;		/* splice flags */
+-	/*
+-	 * actor() private data
+-	 */
+-	union {
+-		void __user *userptr;	/* memory to write to */
+-		struct file *file;	/* file to read/write */
+-		void *data;		/* cookie */
+-	} u;
+-	loff_t pos;			/* file position */
+-};
+-
+-struct partial_page {
+-	unsigned int offset;
+-	unsigned int len;
+-	unsigned long private;
+-};
+-
+-/*
+- * Passed to splice_to_pipe
+- */
+-struct splice_pipe_desc {
+-	struct page **pages;		/* page map */
+-	struct partial_page *partial;	/* pages[] may not be contig */
+-	int nr_pages;			/* number of pages in map */
+-	unsigned int flags;		/* splice flags */
+-	const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+-	void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+-};
+-
+-typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
+-			   struct splice_desc *);
+-typedef int (splice_direct_actor)(struct pipe_inode_info *,
+-				  struct splice_desc *);
+-
+-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
+-				loff_t *, size_t, unsigned int,
+-				splice_actor *);
+-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
+-				  struct splice_desc *, splice_actor *);
+-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
+-			      struct splice_pipe_desc *);
+-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+-				      splice_direct_actor *);
+-
+-#endif
+diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
+index 10709cb..9bbadbd 100644
+--- a/include/linux/sunrpc/debug.h
++++ b/include/linux/sunrpc/debug.h
+@@ -88,6 +88,7 @@ enum {
+ 	CTL_SLOTTABLE_TCP,
+ 	CTL_MIN_RESVPORT,
+ 	CTL_MAX_RESVPORT,
++	CTL_TRANSPORT,
+ };
+ 
+ #endif /* _LINUX_SUNRPC_DEBUG_H_ */
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index dc69068..3a0f48f 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -255,7 +255,7 @@ struct svc_rqst {
+ 						 * determine what device number
+ 						 * to report (real or virtual)
+ 						 */
+-	int			rq_splice_ok;   /* turned off in gss privacy
++	int			rq_sendfile_ok;   /* turned off in gss privacy
+ 						 * to prevent encrypting page
+ 						 * cache pages */
+ 	wait_queue_head_t	rq_wait;	/* synchronization */
+diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
+index 6bfea9e..f0a110d 100644
+--- a/net/sunrpc/auth.c
++++ b/net/sunrpc/auth.c
+@@ -566,19 +566,16 @@ rpcauth_uptodatecred(struct rpc_task *task)
+ 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
+ }
+ 
+-static struct shrinker rpc_cred_shrinker = {
+-	.shrink = rpcauth_cache_shrinker,
+-	.seeks = DEFAULT_SEEKS,
+-};
++static struct shrinker *rpc_cred_shrinker;
+ 
+ void __init rpcauth_init_module(void)
+ {
+ 	rpc_init_authunix();
+ 	rpc_init_generic_auth();
+-	register_shrinker(&rpc_cred_shrinker);
++	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
+ }
+ 
+ void __exit rpcauth_remove_module(void)
+ {
+-	unregister_shrinker(&rpc_cred_shrinker);
++	remove_shrinker(rpc_cred_shrinker);
+ }
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 853a414..71ba862 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -481,7 +481,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ 	const void *p, *end;
+ 	void *buf;
+ 	struct gss_upcall_msg *gss_msg;
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct gss_cl_ctx *ctx;
+ 	uid_t uid;
+ 	ssize_t err = -EFBIG;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 81ae3d6..acfb1d1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -859,7 +859,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+ 	u32 priv_len, maj_stat;
+ 	int pad, saved_len, remaining_len, offset;
+ 
+-	rqstp->rq_splice_ok = 0;
++	rqstp->rq_sendfile_ok = 0;
+ 
+ 	priv_len = svc_getnl(&buf->head[0]);
+ 	if (rqstp->rq_deferred) {
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index c996671..58e606e 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -696,7 +696,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+ {
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_request *rq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 	int err;
+ 
+ 	if (count == 0)
+@@ -773,7 +773,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count,
+ 	    loff_t *ppos)
+ {
+ 	int err;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 
+ 	if (count == 0)
+ 		return 0;
+@@ -804,7 +804,7 @@ cache_poll(struct file *filp, poll_table *wait)
+ 	unsigned int mask;
+ 	struct cache_reader *rp = filp->private_data;
+ 	struct cache_queue *cq;
+-	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
+ 
+ 	poll_wait(filp, &queue_wait, wait);
+ 
+@@ -1239,7 +1239,7 @@ static int c_show(struct seq_file *m, void *p)
+ 	return cd->cache_show(m, cd, cp);
+ }
+ 
+-static const struct seq_operations cache_content_op = {
++static struct seq_operations cache_content_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+@@ -1269,7 +1269,7 @@ static const struct file_operations content_file_operations = {
+ static ssize_t read_flush(struct file *file, char __user *buf,
+ 			    size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	unsigned long p = *ppos;
+ 	size_t len;
+@@ -1290,7 +1290,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
+ static ssize_t write_flush(struct file * file, const char __user * buf,
+ 			     size_t count, loff_t *ppos)
+ {
+-	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
++	struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data;
+ 	char tbuf[20];
+ 	char *ep;
+ 	long flushtime;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 76739e9..11bfb52 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -213,10 +213,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
+ 	}
+ 
+ 	/* save the nodename */
+-	clnt->cl_nodelen = strlen(utsname()->nodename);
++	clnt->cl_nodelen = strlen(system_utsname.nodename);
+ 	if (clnt->cl_nodelen > UNX_MAXNODENAME)
+ 		clnt->cl_nodelen = UNX_MAXNODENAME;
+-	memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
++	memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
+ 	rpc_register_client(clnt);
+ 	return clnt;
+ 
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 23a2b8f..003a6ec 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -26,6 +26,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/workqueue.h>
+ #include <linux/sunrpc/rpc_pipe_fs.h>
++#include <linux/path.h>
+ 
+ static struct vfsmount *rpc_mount __read_mostly;
+ static int rpc_mount_count;
+@@ -224,7 +225,7 @@ out:
+ static ssize_t
+ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	struct rpc_pipe_msg *msg;
+ 	int res = 0;
+@@ -267,7 +268,7 @@ out_unlock:
+ static ssize_t
+ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
+ {
+-	struct inode *inode = filp->f_path.dentry->d_inode;
++	struct inode *inode = filp->f_dentry->d_inode;
+ 	struct rpc_inode *rpci = RPC_I(inode);
+ 	int res;
+ 
+@@ -285,7 +286,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
+ 	struct rpc_inode *rpci;
+ 	unsigned int mask = 0;
+ 
+-	rpci = RPC_I(filp->f_path.dentry->d_inode);
++	rpci = RPC_I(filp->f_dentry->d_inode);
+ 	poll_wait(filp, &rpci->waitq, wait);
+ 
+ 	mask = POLLOUT | POLLWRNORM;
+@@ -300,7 +301,7 @@ static int
+ rpc_pipe_ioctl(struct inode *ino, struct file *filp,
+ 		unsigned int cmd, unsigned long arg)
+ {
+-	struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
++	struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ 	int len;
+ 
+ 	switch (cmd) {
+@@ -495,7 +496,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
+ static void
+ rpc_release_path(struct nameidata *nd)
+ {
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	rpc_put_mount();
+ }
+ 
+@@ -668,7 +669,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
+ 
+ 	if ((error = rpc_lookup_parent(path, nd)) != 0)
+ 		return ERR_PTR(error);
+-	dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
++	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len,
+ 				   1);
+ 	if (IS_ERR(dentry))
+ 		rpc_release_path(nd);
+@@ -696,7 +697,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
+ 	dentry = rpc_lookup_negative(path, &nd);
+ 	if (IS_ERR(dentry))
+ 		return dentry;
+-	dir = nd.path.dentry->d_inode;
++	dir = nd.dentry->d_inode;
+ 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
+ 		goto err_dput;
+ 	RPC_I(dentry->d_inode)->private = rpc_client;
+@@ -897,7 +898,7 @@ static struct file_system_type rpc_pipe_fs_type = {
+ };
+ 
+ static void
+-init_once(void *foo)
++init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
+ 
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 24db2b4..0f6f1ea 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -117,18 +117,6 @@ static void rpcb_map_release(void *data)
+ 	kfree(map);
+ }
+ 
+-static const struct sockaddr_in rpcb_inaddr_loopback = {
+-	.sin_family		= AF_INET,
+-	.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
+-	.sin_port		= htons(RPCBIND_PORT),
+-};
+-
+-static const struct sockaddr_in6 rpcb_in6addr_loopback = {
+-	.sin6_family		= AF_INET6,
+-	.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
+-	.sin6_port		= htons(RPCBIND_PORT),
+-};
+-
+ static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
+ 					  size_t addrlen, u32 version)
+ {
+@@ -248,6 +236,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
+ 		.rpc_argp	= &map,
+ 		.rpc_resp	= okay,
+ 	};
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
+ 
+ 	dprintk("RPC:       %sregistering (%u, %u, %d, %u) with local "
+ 			"rpcbind\n", (port ? "" : "un"),
+@@ -272,6 +265,12 @@ static int rpcb_register_netid4(struct sockaddr_in *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin_port);
+ 	char buf[32];
+ 
++	struct sockaddr_in rpcb_inaddr_loopback = {
++		.sin_family		= AF_INET,
++		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
++		.sin_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIPQUAD_FMT".%u.%u",
+@@ -303,6 +302,12 @@ static int rpcb_register_netid6(struct sockaddr_in6 *address_to_register,
+ 	unsigned short port = ntohs(address_to_register->sin6_port);
+ 	char buf[64];
+ 
++	struct sockaddr_in6 rpcb_in6addr_loopback = {
++		.sin6_family		= AF_INET6,
++		.sin6_addr		= IN6ADDR_LOOPBACK_INIT,
++		.sin6_port		= htons(RPCBIND_PORT),
++	};
++
+ 	/* Construct AF_INET6 universal address */
+ 	snprintf(buf, sizeof(buf),
+ 			NIP6_FMT".%u.%u",
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index 50b049c..5053a5f 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -264,7 +264,7 @@ rpc_proc_init(void)
+ 	dprintk("RPC:       registering /proc/net/rpc\n");
+ 	if (!proc_net_rpc) {
+ 		struct proc_dir_entry *ent;
+-		ent = proc_mkdir("rpc", init_net.proc_net);
++		ent = proc_mkdir("rpc", proc_net);
+ 		if (ent) {
+ 			ent->owner = THIS_MODULE;
+ 			proc_net_rpc = ent;
+@@ -278,7 +278,7 @@ rpc_proc_exit(void)
+ 	dprintk("RPC:       unregistering /proc/net/rpc\n");
+ 	if (proc_net_rpc) {
+ 		proc_net_rpc = NULL;
+-		remove_proc_entry("rpc", init_net.proc_net);
++		remove_proc_entry("rpc", proc_net);
+ 	}
+ }
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 5a32cb7..e0e87c6 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -174,7 +174,7 @@ fail:
+ static int
+ svc_pool_map_init_percpu(struct svc_pool_map *m)
+ {
+-	unsigned int maxpools = nr_cpu_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int cpu;
+ 	int err;
+@@ -202,7 +202,7 @@ svc_pool_map_init_percpu(struct svc_pool_map *m)
+ static int
+ svc_pool_map_init_pernode(struct svc_pool_map *m)
+ {
+-	unsigned int maxpools = nr_node_ids;
++	unsigned int maxpools = highest_possible_processor_id() + 1;
+ 	unsigned int pidx = 0;
+ 	unsigned int node;
+ 	int err;
+@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
+ 	switch (m->mode) {
+ 	case SVC_POOL_PERCPU:
+ 	{
+-		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
++		set_cpus_allowed(task, cpumask_of_cpu(node));
+ 		break;
+ 	}
+ 	case SVC_POOL_PERNODE:
+ 	{
+-		node_to_cpumask_ptr(nodecpumask, node);
+-		set_cpus_allowed_ptr(task, nodecpumask);
++		set_cpus_allowed(task, node_to_cpumask(node));
+ 		break;
+ 	}
+ 	}
+@@ -831,7 +830,7 @@ svc_process(struct svc_rqst *rqstp)
+ 	rqstp->rq_res.tail[0].iov_base = NULL;
+ 	rqstp->rq_res.tail[0].iov_len = 0;
+ 	/* Will be turned off only in gss privacy case: */
+-	rqstp->rq_splice_ok = 1;
++	rqstp->rq_sendfile_ok = 1;
+ 
+ 	/* Setup reply header */
+ 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index f24800f..b30d725 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -678,7 +678,7 @@ int
+ svcauth_unix_set_client(struct svc_rqst *rqstp)
+ {
+ 	struct sockaddr_in *sin;
+-	struct sockaddr_in6 *sin6, sin6_storage;
++	struct sockaddr_in6 *sin6 = NULL, sin6_storage;
+ 	struct ip_map *ipm;
+ 
+ 	switch (rqstp->rq_addr.ss_family) {
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 3e65719..cbb47a6 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -472,12 +472,16 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
+ 	if (len < 0)
+ 		return len;
+ 	rqstp->rq_addrlen = len;
+-	if (skb->tstamp.tv64 == 0) {
+-		skb->tstamp = ktime_get_real();
++	if (skb->tstamp.off_sec == 0) {
++		struct timeval tv;
++
++		tv.tv_sec = xtime.tv_sec;
++		tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
++		skb_set_timestamp(skb, &tv);
+ 		/* Don't enable netstamp, sunrpc doesn't
+ 		   need that much accuracy */
+ 	}
+-	svsk->sk_sk->sk_stamp = skb->tstamp;
++	skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
+ 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
+ 
+ 	/*
+diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
+index 5231f7a..1482e34 100644
+--- a/net/sunrpc/sysctl.c
++++ b/net/sunrpc/sysctl.c
+@@ -135,6 +135,7 @@ done:
+ 
+ static ctl_table debug_table[] = {
+ 	{
++		.ctl_name	= CTL_RPCDEBUG, 
+ 		.procname	= "rpc_debug",
+ 		.data		= &rpc_debug,
+ 		.maxlen		= sizeof(int),
+@@ -142,6 +143,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDEBUG,
+ 		.procname	= "nfs_debug",
+ 		.data		= &nfs_debug,
+ 		.maxlen		= sizeof(int),
+@@ -149,6 +151,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NFSDDEBUG,
+ 		.procname	= "nfsd_debug",
+ 		.data		= &nfsd_debug,
+ 		.maxlen		= sizeof(int),
+@@ -156,6 +159,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_NLMDEBUG,
+ 		.procname	= "nlm_debug",
+ 		.data		= &nlm_debug,
+ 		.maxlen		= sizeof(int),
+@@ -163,6 +167,7 @@ static ctl_table debug_table[] = {
+ 		.proc_handler	= &proc_dodebug
+ 	},
+ 	{
++		.ctl_name	= CTL_TRANSPORT,
+ 		.procname	= "transports",
+ 		.maxlen		= 256,
+ 		.mode		= 0444,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 6fb493c..761ad29 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -247,10 +247,6 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
+ 
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
+-
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an SQ
+@@ -411,10 +407,6 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+ 	struct svcxprt_rdma *xprt = cq_context;
+ 	unsigned long flags;
+ 
+-	/* Guard against unconditional flush call for destroyed QP */
+-	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+-		return;
+-
+ 	/*
+ 	 * Set the bit regardless of whether or not it's on the list
+ 	 * because it may be on the list already due to an RQ
+@@ -1116,9 +1108,6 @@ static void __svc_rdma_free(struct work_struct *work)
+ 		container_of(work, struct svcxprt_rdma, sc_work);
+ 	dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
+ 
+-	/* We should only be called from kref_put */
+-	BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
+-
+ 	/*
+ 	 * Destroy queued, but not processed read completions. Note
+ 	 * that this cleanup has to be done before destroying the
+diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
+index 8710117..ce94fa4 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma.c
++++ b/net/sunrpc/xprtrdma/svc_rdma.c
+@@ -116,6 +116,7 @@ static int read_reset_stat(ctl_table *table, int write,
+ static struct ctl_table_header *svcrdma_table_header;
+ static ctl_table svcrdma_parm_table[] = {
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_requests",
+ 		.data		= &svcrdma_max_requests,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -126,6 +127,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.extra2		= &max_max_requests
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_req_size",
+ 		.data		= &svcrdma_max_req_size,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -136,6 +138,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.extra2		= &max_max_inline
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "max_outbound_read_requests",
+ 		.data		= &svcrdma_ord,
+ 		.maxlen		= sizeof(unsigned int),
+@@ -147,6 +150,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 	},
+ 
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_read",
+ 		.data		= &rdma_stat_read,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -154,6 +158,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_recv",
+ 		.data		= &rdma_stat_recv,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -161,6 +166,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_write",
+ 		.data		= &rdma_stat_write,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -168,6 +174,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_starve",
+ 		.data		= &rdma_stat_sq_starve,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -175,6 +182,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_starve",
+ 		.data		= &rdma_stat_rq_starve,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -182,6 +190,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_poll",
+ 		.data		= &rdma_stat_rq_poll,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -189,6 +198,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_rq_prod",
+ 		.data		= &rdma_stat_rq_prod,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -196,6 +206,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_poll",
+ 		.data		= &rdma_stat_sq_poll,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -203,6 +214,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 		.proc_handler	= &read_reset_stat,
+ 	},
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "rdma_stat_sq_prod",
+ 		.data		= &rdma_stat_sq_prod,
+ 		.maxlen		= sizeof(atomic_t),
+@@ -216,6 +228,7 @@ static ctl_table svcrdma_parm_table[] = {
+ 
+ static ctl_table svcrdma_table[] = {
+ 	{
++		.ctl_name	= CTL_UNNUMBERED,
+ 		.procname	= "svc_rdma",
+ 		.mode		= 0555,
+ 		.child		= svcrdma_parm_table

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,242 @@
+---
+ drivers/infiniband/ulp/sdp/sdp.h       |   26 --------------------------
+ drivers/infiniband/ulp/sdp/sdp_bcopy.c |   24 ++++++++++++------------
+ drivers/infiniband/ulp/sdp/sdp_cma.c   |    2 --
+ drivers/infiniband/ulp/sdp/sdp_main.c  |   19 +++++++++----------
+ 4 files changed, 21 insertions(+), 50 deletions(-)
+
+Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+===================================================================
+--- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+@@ -317,30 +317,4 @@
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure(sk);
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+===================================================================
+--- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -139,7 +139,7 @@
+ 	}
+ 
+ 
+-	sk_mem_reclaim(sk);
++	sk_stream_mem_reclaim(sk);
+ 
+ 	if (!sock_flag(sk, SOCK_DEAD)) {
+ 		sk->sk_state_change(sk);
+@@ -190,7 +190,7 @@
+ 	struct ib_send_wr *bad_wr;
+ 
+ 	h->mid = mid;
+-	if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
++	if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG))
+ 		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
+ 	else
+ 		h->flags = 0;
+@@ -234,7 +234,7 @@
+ 	ssk->tx_wr.num_sge = frags + 1;
+ 	ssk->tx_wr.opcode = IB_WR_SEND;
+ 	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
+-	if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
++	if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG))
+ 		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
+ 	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
+ 	++ssk->tx_head;
+@@ -304,11 +304,11 @@
+ 	/* TODO: allocate from cache */
+ 
+ 	if (unlikely(ssk->isk.sk.sk_allocation)) {
+-		skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
++		skb = sk_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
+ 					  ssk->isk.sk.sk_allocation);
+ 		gfp_page = ssk->isk.sk.sk_allocation | __GFP_HIGHMEM;
+ 	} else {
+-		skb = sdp_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
++		skb = sk_stream_alloc_skb(&ssk->isk.sk, SDP_HEAD_SIZE,
+ 					  GFP_KERNEL);
+ 		gfp_page = GFP_HIGHUSER;
+ 	}
+@@ -476,7 +476,7 @@
+ 	if (likely(ssk->bufs > 1) &&
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+ 		struct sk_buff *skb;
+-		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
++		skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ 					  sizeof(struct sdp_bsdh),
+ 					  GFP_KERNEL);
+ 		if (!skb)
+@@ -514,7 +514,7 @@
+ 	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
+ 		struct sdp_chrecvbuf *resp_size;
+ 		ssk->recv_request = 0;
+-		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
++		skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ 					  sizeof(struct sdp_bsdh) +
+ 					  sizeof(*resp_size),
+ 					  gfp_page);
+@@ -539,7 +539,7 @@
+ 	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
+ 	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
+ 		struct sdp_chrecvbuf *req_size;
+-		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
++		skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ 					  sizeof(struct sdp_bsdh) +
+ 					  sizeof(*req_size),
+ 					  gfp_page);
+@@ -561,7 +561,7 @@
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
+-		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
++		skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ 					  sizeof(struct sdp_bsdh),
+ 					  GFP_KERNEL);
+ 		/* FIXME */
+@@ -573,7 +573,7 @@
+ 		!ssk->isk.sk.sk_send_head &&
+ 		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
+-		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
++		skb = sk_stream_alloc_skb(&ssk->isk.sk,
+ 					  sizeof(struct sdp_bsdh),
+ 					  gfp_page);
+ 		/* FIXME */
+@@ -778,7 +778,7 @@
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -864,7 +864,7 @@
+ 
+ 	sdp_poll_cq(ssk, cq);
+ 	release_sock(sk);
+-	sk_mem_reclaim(sk);
++	sk_stream_mem_reclaim(sk);
+ 	lock_sock(sk);
+ 	cq = ssk->cq;
+ 	if (unlikely(!cq))
+Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+===================================================================
+--- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@
+ 		goto err_cq;
+ 	}
+ 
+-	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+-
+         qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
+ 
+ 	rc = rdma_create_qp(id, pd, &qp_init_attr);
+Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+===================================================================
+--- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -509,7 +509,7 @@
+ 		__kfree_skb(skb);
+ 	}
+ 
+-	sk_mem_reclaim(sk);
++	sk_stream_mem_reclaim(sk);
+ 
+ 	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
+ 	 * 3.10, we send a RST here because data was lost.  To
+@@ -1200,7 +1200,7 @@
+ {
+ 	if (unlikely(flags & MSG_OOB)) {
+ 		struct sk_buff *skb = sk->sk_write_queue.prev;
+-		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_URG;
++		TCP_SKB_CB(skb)->flags |= TCPCB_URG;
+ 	}
+ }
+ 
+@@ -1217,8 +1217,7 @@
+ {
+         skb_header_release(skb);
+         __skb_queue_tail(&sk->sk_write_queue, skb);
+-	sk->sk_wmem_queued += skb->truesize;
+-        sk_mem_charge(sk, skb->truesize);
++	sk_charge_skb(sk, skb);
+         if (!sk->sk_send_head)
+                 sk->sk_send_head = skb;
+         if (ssk->nonagle & TCP_NAGLE_PUSH)
+@@ -1382,7 +1381,7 @@
+ 		if (copy > PAGE_SIZE - off)
+ 			copy = PAGE_SIZE - off;
+ 
+-		if (!sk_wmem_schedule(sk, copy))
++		if (!sk_stream_wmem_schedule(sk, copy))
+ 			return SDP_DO_WAIT_MEM;
+ 
+ 		if (!page) {
+@@ -1454,7 +1453,7 @@
+ 		if (left <= this_page)
+ 			this_page = left;
+ 
+-		if (!sk_wmem_schedule(sk, copy))
++		if (!sk_stream_wmem_schedule(sk, copy))
+ 			return SDP_DO_WAIT_MEM;
+ 
+ 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+@@ -1662,8 +1661,8 @@
+ 						goto wait_for_sndbuf;
+ 				}
+ 
+-				skb = sdp_stream_alloc_skb(sk, select_size(sk, ssk),
+-							   sk->sk_allocation);
++				skb = sk_stream_alloc_pskb(sk, select_size(sk, ssk),
++							   0, sk->sk_allocation);
+ 				if (!skb)
+ 					goto wait_for_memory;
+ 
+@@ -1687,7 +1686,7 @@
+ 
+ 			/* OOB data byte should be the last byte of
+ 			   the data payload */
+-			if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG) &&
++			if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG) &&
+ 			    !(flags & MSG_OOB)) {
+ 				sdp_mark_push(ssk, skb);
+ 				goto new_segment;
+@@ -1763,7 +1762,7 @@
+ 		if (sk->sk_send_head == skb)
+ 			sk->sk_send_head = NULL;
+ 		__skb_unlink(skb, &sk->sk_write_queue);
+-		sk_wmem_free_skb(sk, skb);
++		sk_stream_free_skb(sk, skb);
+ 	}
+ 
+ do_error:

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0100_revert_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0100_revert_to_2_6_23.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0100_revert_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,54 @@
+---
+ drivers/infiniband/ulp/sdp/sdp_main.c |   12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+===================================================================
+--- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -2158,16 +2158,13 @@
+ 	.sendpage   = sock_no_sendpage,
+ };
+ 
+-static int sdp_create_socket(struct net *net, struct socket *sock, int protocol)
++static int sdp_create_socket(struct socket *sock, int protocol)
+ {
+ 	struct sock *sk;
+ 	int rc;
+ 
+ 	sdp_dbg(NULL, "%s: type %d protocol %d\n", __func__, sock->type, protocol);
+ 
+-	if (net != &init_net)
+-		return -EAFNOSUPPORT;
+-
+ 	if (sock->type != SOCK_STREAM) {
+ 		sdp_warn(NULL, "SDP: unsupported type %d.\n", sock->type);
+ 		return -ESOCKTNOSUPPORT;
+@@ -2179,7 +2176,7 @@
+ 		return -EPROTONOSUPPORT;
+ 	}
+ 
+-	sk = sk_alloc(net, PF_INET_SDP, GFP_KERNEL, &sdp_proto);
++	sk = sk_alloc(PF_INET_SDP, GFP_KERNEL, &sdp_proto, 1);
+ 	if (!sk) {
+ 		sdp_warn(NULL, "SDP: failed to allocate socket.\n");
+ 		return -ENOMEM;
+@@ -2363,8 +2360,7 @@
+ 	sdp_seq_afinfo.seq_fops->llseek        = seq_lseek;
+ 	sdp_seq_afinfo.seq_fops->release       = seq_release_private;
+ 
+-	p = proc_net_fops_create(&init_net, sdp_seq_afinfo.name, S_IRUGO,
+-				 sdp_seq_afinfo.seq_fops);
++ 	p = proc_net_fops_create(sdp_seq_afinfo.name, S_IRUGO, sdp_seq_afinfo.seq_fops);
+ 	if (p)
+ 		p->data = &sdp_seq_afinfo;
+ 	else
+@@ -2375,7 +2371,7 @@
+ 
+ static void sdp_proc_unregister(void)
+ {
+-	proc_net_remove(&init_net, sdp_seq_afinfo.name);
++	proc_net_remove(sdp_seq_afinfo.name);
+ 	memset(sdp_seq_afinfo.seq_fops, 0, sizeof(*sdp_seq_afinfo.seq_fops));
+ }
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0120_revert_2_6_27_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0120_revert_2_6_27_to_2_6_24.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/sdp_0120_revert_2_6_27_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,51 @@
+---
+ drivers/infiniband/ulp/sdp/sdp_cma.c  |    1 +
+ drivers/infiniband/ulp/sdp/sdp_main.c |    8 ++++----
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/sdp/sdp_cma.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -31,6 +31,7 @@
+  *
+  * $Id$
+  */
++#include <asm/semaphore.h>
+ #include <linux/device.h>
+ #include <linux/in.h>
+ #include <linux/err.h>
+Index: ofed_kernel/drivers/infiniband/ulp/sdp/sdp_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -523,7 +523,7 @@ static void sdp_close(struct sock *sk, l
+ 	if (data_was_unread ||
+ 		(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
+ 		/* Unread data was tossed, zap the connection. */
+-		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
++		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
+ 		sdp_exch_state(sk, TCPF_CLOSE_WAIT | TCPF_ESTABLISHED,
+ 			       TCP_TIME_WAIT);
+ 
+@@ -1845,7 +1845,7 @@ static int sdp_recvmsg(struct kiocb *ioc
+ 			if (offset < skb->len)
+ 				goto found_ok_skb;
+ 
+-			WARN_ON(!(flags & MSG_PEEK));
++			BUG_TRAP(flags & MSG_PEEK);
+ 			skb = skb->next;
+ 		} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
+ 
+@@ -2082,9 +2082,9 @@ static unsigned int sdp_poll(struct file
+ 	return mask;
+ }
+ 
+-static void sdp_enter_memory_pressure(struct sock *sk)
++static void sdp_enter_memory_pressure(void)
+ {
+-	sdp_dbg(sk, "%s\n", __func__);
++	sdp_dbg(NULL, "%s\n", __func__);
+ }
+ 
+ void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0100_revert_role_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0100_revert_role_to_2_6_23.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0100_revert_role_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,30 @@
+commit aebd5e476ecc8ceb53577b20f2a352ff4ceffd8d
+Author: FUJITA Tomonori <tomof at acm.org>
+Date:   Wed Jul 11 15:08:15 2007 +0900
+
+    [SCSI] transport_srp: add rport roles attribute
+    
+    This adds a 'roles' attribute to rport like transport_fc. The role can
+    be initiator or target. That is, the initiator driver creates target
+    remote ports and the target driver creates initiator remote ports.
+    
+    Signed-off-by: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+    Signed-off-by: Mike Christie <michaelc at cs.wisc.edu>
+    Signed-off-by: James Bottomley <James.Bottomley at SteelEye.com>
+
+---
+ drivers/infiniband/ulp/srp/ib_srp.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srp/ib_srp.c
++++ ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1741,7 +1741,6 @@ static int srp_add_target(struct srp_hos
+ 
+ 	memcpy(ids.port_id, &target->id_ext, 8);
+ 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
+-	ids.roles = SRP_RPORT_ROLE_TARGET;
+ 	rport = srp_rport_add(target->scsi_host, &ids);
+ 	if (IS_ERR(rport)) {
+ 		scsi_remove_host(target->scsi_host);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0200_revert_srp_transport_to_2.6.23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0200_revert_srp_transport_to_2.6.23.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_0200_revert_srp_transport_to_2.6.23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,146 @@
+commit 3236822b1c9b67ad10745d965515b528818f1120
+Author: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+Date:   Wed Jun 27 16:33:12 2007 +0900
+
+    [SCSI] ib_srp: convert to use the srp transport class
+    
+    This converts ib_srp to use the srp transport class.
+    
+    I don't have ib hardware so I've not tested this patch.
+    
+    Signed-off-by: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+    Cc: Roland Dreier <rolandd at cisco.com>
+    Signed-off-by: James Bottomley <James.Bottomley at SteelEye.com>
+
+---
+ drivers/infiniband/ulp/srp/Kconfig  |    1 -
+ drivers/infiniband/ulp/srp/ib_srp.c |   28 ----------------------------
+ 2 files changed, 29 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/srp/Kconfig
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srp/Kconfig
++++ ofed_kernel/drivers/infiniband/ulp/srp/Kconfig
+@@ -1,7 +1,6 @@
+ config INFINIBAND_SRP
+ 	tristate "InfiniBand SCSI RDMA Protocol"
+ 	depends on SCSI
+-	select SCSI_SRP_ATTRS
+ 	---help---
+ 	  Support for the SCSI RDMA Protocol over InfiniBand.  This
+ 	  allows you to access storage devices that speak SRP over
+Index: ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srp/ib_srp.c
++++ ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -45,7 +45,6 @@
+ #include <scsi/scsi_device.h>
+ #include <scsi/scsi_dbg.h>
+ #include <scsi/srp.h>
+-#include <scsi/scsi_transport_srp.h>
+ 
+ #include "ib_srp.h"
+ 
+@@ -90,8 +89,6 @@ static void srp_remove_one(struct ib_dev
+ static void srp_completion(struct ib_cq *cq, void *target_ptr);
+ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+ 
+-static struct scsi_transport_template *ib_srp_transport_template;
+-
+ static struct ib_client srp_client = {
+ 	.name   = "srp",
+ 	.add    = srp_add_one,
+@@ -447,7 +444,6 @@ static void srp_remove_work(struct work_
+ 	list_del(&target->list);
+ 	spin_unlock(&target->srp_host->target_lock);
+ 
+-	srp_remove_host(target->scsi_host);
+ 	scsi_remove_host(target->scsi_host);
+ 	ib_destroy_cm_id(target->cm_id);
+ 	srp_free_target_ib(target);
+@@ -1730,23 +1726,12 @@ static struct scsi_host_template srp_tem
+ 
+ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+ {
+-	struct srp_rport_identifiers ids;
+-	struct srp_rport *rport;
+-
+ 	sprintf(target->target_name, "SRP.T10:%016llX",
+ 		 (unsigned long long) be64_to_cpu(target->id_ext));
+ 
+ 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
+ 		return -ENODEV;
+ 
+-	memcpy(ids.port_id, &target->id_ext, 8);
+-	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
+-	rport = srp_rport_add(target->scsi_host, &ids);
+-	if (IS_ERR(rport)) {
+-		scsi_remove_host(target->scsi_host);
+-		return PTR_ERR(rport);
+-	}
+-
+ 	spin_lock(&host->target_lock);
+ 	list_add_tail(&target->list, &host->target_list);
+ 	spin_unlock(&host->target_lock);
+@@ -1973,7 +1958,6 @@ static ssize_t srp_create_target(struct 
+ 	if (!target_host)
+ 		return -ENOMEM;
+ 
+-	target_host->transportt = ib_srp_transport_template;
+ 	target_host->max_lun     = SRP_MAX_LUN;
+ 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
+ 
+@@ -2307,7 +2291,6 @@ static void srp_remove_one(struct ib_dev
+ 
+ 		list_for_each_entry_safe(target, tmp_target,
+ 					 &host->target_list, list) {
+-			srp_remove_host(target->scsi_host);
+ 			scsi_remove_host(target->scsi_host);
+ 			srp_disconnect_target(target);
+ 			ib_destroy_cm_id(target->cm_id);
+@@ -2326,9 +2309,6 @@ static void srp_remove_one(struct ib_dev
+ 	kfree(srp_dev);
+ }
+ 
+-static struct srp_function_template ib_srp_transport_functions = {
+-};
+-
+ static int __init srp_init_module(void)
+ {
+ 	int ret;
+@@ -2338,11 +2318,6 @@ static int __init srp_init_module(void)
+ 		srp_sg_tablesize = 255;
+ 	}
+ 
+-	ib_srp_transport_template =
+-		srp_attach_transport(&ib_srp_transport_functions);
+-	if (!ib_srp_transport_template)
+-		return -ENOMEM;
+-
+ 	srp_template.sg_tablesize = srp_sg_tablesize;
+ 	srp_max_iu_len = (sizeof (struct srp_cmd) +
+ 			  sizeof (struct srp_indirect_buf) +
+@@ -2354,7 +2329,6 @@ static int __init srp_init_module(void)
+ 	ret = class_register(&srp_class);
+ 	if (ret) {
+ 		printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
+-		srp_release_transport(ib_srp_transport_template);
+ 		return ret;
+ 	}
+ 
+@@ -2363,7 +2337,6 @@ static int __init srp_init_module(void)
+ 	ret = ib_register_client(&srp_client);
+ 	if (ret) {
+ 		printk(KERN_ERR PFX "couldn't register IB client\n");
+-		srp_release_transport(ib_srp_transport_template);
+ 		ib_sa_unregister_client(&srp_sa_client);
+ 		class_unregister(&srp_class);
+ 		return ret;
+@@ -2377,7 +2350,6 @@ static void __exit srp_cleanup_module(vo
+ 	ib_unregister_client(&srp_client);
+ 	ib_sa_unregister_client(&srp_sa_client);
+ 	class_unregister(&srp_class);
+-	srp_release_transport(ib_srp_transport_template);
+ }
+ 
+ module_init(srp_init_module);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_class_device_if.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_class_device_if.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_class_device_if.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,333 @@
+---
+ drivers/infiniband/ulp/srp/ib_srp.c |  154 ++++++++++++++++--------------------
+ drivers/infiniband/ulp/srp/ib_srp.h |    2 
+ 2 files changed, 72 insertions(+), 84 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srp/ib_srp.c
++++ ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1517,10 +1517,9 @@ static int srp_reset_host(struct scsi_cm
+ 	return ret;
+ }
+ 
+-static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
+-			   char *buf)
++static ssize_t show_id_ext(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	if (target->state == SRP_TARGET_DEAD ||
+ 	    target->state == SRP_TARGET_REMOVED)
+@@ -1530,10 +1529,9 @@ static ssize_t show_id_ext(struct device
+ 		       (unsigned long long) be64_to_cpu(target->id_ext));
+ }
+ 
+-static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
+-			     char *buf)
++static ssize_t show_ioc_guid(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	if (target->state == SRP_TARGET_DEAD ||
+ 	    target->state == SRP_TARGET_REMOVED)
+@@ -1543,10 +1541,9 @@ static ssize_t show_ioc_guid(struct devi
+ 		       (unsigned long long) be64_to_cpu(target->ioc_guid));
+ }
+ 
+-static ssize_t show_service_id(struct device *dev,
+-			       struct device_attribute *attr, char *buf)
++static ssize_t show_service_id(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	if (target->state == SRP_TARGET_DEAD ||
+ 	    target->state == SRP_TARGET_REMOVED)
+@@ -1556,10 +1553,9 @@ static ssize_t show_service_id(struct de
+ 		       (unsigned long long) be64_to_cpu(target->service_id));
+ }
+ 
+-static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
+-			 char *buf)
++static ssize_t show_pkey(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	if (target->state == SRP_TARGET_DEAD ||
+ 	    target->state == SRP_TARGET_REMOVED)
+@@ -1568,10 +1564,9 @@ static ssize_t show_pkey(struct device *
+ 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
+ }
+ 
+-static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
+-			 char *buf)
++static ssize_t show_dgid(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	if (target->state == SRP_TARGET_DEAD ||
+ 	    target->state == SRP_TARGET_REMOVED)
+@@ -1588,10 +1583,9 @@ static ssize_t show_dgid(struct device *
+ 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
+ }
+ 
+-static ssize_t show_orig_dgid(struct device *dev,
+-			      struct device_attribute *attr, char *buf)
++static ssize_t show_orig_dgid(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	if (target->state == SRP_TARGET_DEAD ||
+ 	    target->state == SRP_TARGET_REMOVED)
+@@ -1608,10 +1602,9 @@ static ssize_t show_orig_dgid(struct dev
+ 		       be16_to_cpu(target->orig_dgid[7]));
+ }
+ 
+-static ssize_t show_zero_req_lim(struct device *dev,
+-				 struct device_attribute *attr, char *buf)
++static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	if (target->state == SRP_TARGET_DEAD ||
+ 	    target->state == SRP_TARGET_REMOVED)
+@@ -1620,27 +1613,24 @@ static ssize_t show_zero_req_lim(struct 
+ 	return sprintf(buf, "%d\n", target->zero_req_lim);
+ }
+ 
+-static ssize_t show_local_ib_port(struct device *dev,
+-				  struct device_attribute *attr, char *buf)
++static ssize_t show_local_ib_port(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++ 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	return sprintf(buf, "%d\n", target->srp_host->port);
+ }
+ 
+-static ssize_t show_local_ib_device(struct device *dev,
+-				    struct device_attribute *attr, char *buf)
++static ssize_t show_local_ib_device(struct class_device *cdev, char *buf)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++ 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
+ }
+ 
+-static ssize_t srp_target_oofabric(struct device *dev,
+-				   struct device_attribute *attr, const char *buf,
+-				   size_t count)
++static ssize_t srp_target_oofabric(struct class_device *cdev,
++ 				   const char *buf, size_t count)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++ 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ 		     "Get async_event out-of-fabric at state=%d qp_err=%d\n",
+@@ -1657,11 +1647,10 @@ static ssize_t srp_target_oofabric(struc
+ 	return count;
+ }
+ 
+-static ssize_t srp_target_infabric(struct device *dev,
+-				   struct device_attribute *attr, const char *buf,
+-				   size_t count)
++static ssize_t srp_target_infabric(struct class_device *cdev,
++ 				   const char *buf, size_t count)
+ {
+-	struct srp_target_port *target = host_to_target(class_to_shost(dev));
++ 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+ 
+ 	shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ 		     "Get async_event in-fabric at state=%d qp_err=%d\n",
+@@ -1681,30 +1670,30 @@ static ssize_t srp_target_infabric(struc
+ 	return count;
+ }
+ 
+-static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
+-static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
+-static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
+-static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
+-static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
+-static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
+-static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
+-static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
+-static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
+-static DEVICE_ATTR(target_oofabric, S_IWUSR, NULL,  srp_target_oofabric);
+-static DEVICE_ATTR(target_infabric, S_IWUSR, NULL,  srp_target_infabric);
+-
+-static struct device_attribute *srp_host_attrs[] = {
+-	&dev_attr_id_ext,
+-	&dev_attr_ioc_guid,
+-	&dev_attr_service_id,
+-	&dev_attr_pkey,
+-	&dev_attr_dgid,
+-	&dev_attr_orig_dgid,
+-	&dev_attr_zero_req_lim,
+-	&dev_attr_local_ib_port,
+-	&dev_attr_local_ib_device,
+-	&dev_attr_target_oofabric,
+-	&dev_attr_target_infabric,
++static CLASS_DEVICE_ATTR(id_ext,	  S_IRUGO, show_id_ext,		 NULL);
++static CLASS_DEVICE_ATTR(ioc_guid,	  S_IRUGO, show_ioc_guid,	 NULL);
++static CLASS_DEVICE_ATTR(service_id,	  S_IRUGO, show_service_id,	 NULL);
++static CLASS_DEVICE_ATTR(pkey,		  S_IRUGO, show_pkey,		 NULL);
++static CLASS_DEVICE_ATTR(dgid,		  S_IRUGO, show_dgid,		 NULL);
++static CLASS_DEVICE_ATTR(orig_dgid,	  S_IRUGO, show_orig_dgid,	 NULL);
++static CLASS_DEVICE_ATTR(zero_req_lim,	  S_IRUGO, show_zero_req_lim,	 NULL);
++static CLASS_DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,	 NULL);
++static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
++static CLASS_DEVICE_ATTR(target_oofabric, S_IWUSR, NULL,  srp_target_oofabric);
++static CLASS_DEVICE_ATTR(target_infabric, S_IWUSR, NULL,  srp_target_infabric);
++
++static struct class_device_attribute *srp_host_attrs[] = {
++	&class_device_attr_id_ext,
++	&class_device_attr_ioc_guid,
++	&class_device_attr_service_id,
++	&class_device_attr_pkey,
++	&class_device_attr_dgid,
++	&class_device_attr_orig_dgid,
++	&class_device_attr_zero_req_lim,
++	&class_device_attr_local_ib_port,
++	&class_device_attr_local_ib_device,
++ 	&class_device_attr_target_oofabric,
++ 	&class_device_attr_target_infabric,
+ 	NULL
+ };
+ 
+@@ -1744,17 +1733,17 @@ static int srp_add_target(struct srp_hos
+ 	return 0;
+ }
+ 
+-static void srp_release_dev(struct device *dev)
++static void srp_release_class_dev(struct class_device *class_dev)
+ {
+ 	struct srp_host *host =
+-		container_of(dev, struct srp_host, dev);
++		container_of(class_dev, struct srp_host, class_dev);
+ 
+ 	complete(&host->released);
+ }
+ 
+ static struct class srp_class = {
+ 	.name    = "infiniband_srp",
+-	.dev_release = srp_release_dev
++	.release = srp_release_class_dev
+ };
+ 
+ /*
+@@ -1942,12 +1931,11 @@ out:
+ 	return ret;
+ }
+ 
+-static ssize_t srp_create_target(struct device *dev,
+-				 struct device_attribute *attr,
++static ssize_t srp_create_target(struct class_device *class_dev,
+ 				 const char *buf, size_t count)
+ {
+ 	struct srp_host *host =
+-		container_of(dev, struct srp_host, dev);
++		container_of(class_dev, struct srp_host, class_dev);
+ 	struct Scsi_Host *target_host;
+ 	struct srp_target_port *target;
+ 	int ret;
+@@ -2033,27 +2021,27 @@ err:
+ 	return ret;
+ }
+ 
+-static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
++static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
+ 
+-static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
+-			  char *buf)
++static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
+ {
+-	struct srp_host *host = container_of(dev, struct srp_host, dev);
++	struct srp_host *host =
++		container_of(class_dev, struct srp_host, class_dev);
+ 
+ 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
+ }
+ 
+-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
++static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+ 
+-static ssize_t show_port(struct device *dev, struct device_attribute *attr,
+-			 char *buf)
++static ssize_t show_port(struct class_device *class_dev, char *buf)
+ {
+-	struct srp_host *host = container_of(dev, struct srp_host, dev);
++	struct srp_host *host =
++		container_of(class_dev, struct srp_host, class_dev);
+ 
+ 	return sprintf(buf, "%d\n", host->port);
+ }
+ 
+-static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
++static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
+ 
+ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
+ {
+@@ -2069,24 +2057,24 @@ static struct srp_host *srp_add_port(str
+ 	host->srp_dev = device;
+ 	host->port = port;
+ 
+-	host->dev.class = &srp_class;
+-	host->dev.parent = device->dev->dma_device;
+-	snprintf(host->dev.bus_id, BUS_ID_SIZE, "srp-%s-%d",
++ 	host->class_dev.class = &srp_class;
++ 	host->class_dev.dev   = device->dev->dma_device;
++ 	snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
+ 		 device->dev->name, port);
+ 
+-	if (device_register(&host->dev))
++ 	if (class_device_register(&host->class_dev))
+ 		goto free_host;
+-	if (device_create_file(&host->dev, &dev_attr_add_target))
++ 	if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
+ 		goto err_class;
+-	if (device_create_file(&host->dev, &dev_attr_ibdev))
++ 	if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
+ 		goto err_class;
+-	if (device_create_file(&host->dev, &dev_attr_port))
++ 	if (class_device_create_file(&host->class_dev, &class_device_attr_port))
+ 		goto err_class;
+ 
+ 	return host;
+ 
+ err_class:
+-	device_unregister(&host->dev);
++ 	class_device_unregister(&host->class_dev);
+ 
+ free_host:
+ 	kfree(host);
+@@ -2263,7 +2251,7 @@ static void srp_remove_one(struct ib_dev
+ 	ib_unregister_event_handler(&srp_dev->event_handler);
+ 
+ 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
+-		device_unregister(&host->dev);
++		class_device_unregister(&host->class_dev);
+ 		/*
+ 		 * Wait for the sysfs entry to go away, so that no new
+ 		 * target ports can be created.
+Index: ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srp/ib_srp.h
++++ ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -98,7 +98,7 @@ struct srp_device {
+ struct srp_host {
+ 	struct srp_device      *srp_dev;
+ 	u8			port;
+-	struct device		dev;
++	struct class_device	class_dev;
+ 	struct list_head	target_list;
+ 	spinlock_t		target_lock;
+ 	struct completion	released;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_cmd_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_cmd_to_2_6_22.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srp_cmd_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,79 @@
+---
+ drivers/infiniband/ulp/srp/ib_srp.c |   33 +++++++++++++++++++++++++++++----
+ drivers/infiniband/ulp/srp/ib_srp.h |    5 +++++
+ 2 files changed, 34 insertions(+), 4 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srp/ib_srp.c
++++ ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -510,6 +510,9 @@ static void srp_unmap_data(struct scsi_c
+ 			   struct srp_target_port *target,
+ 			   struct srp_request *req)
+ {
++	struct scatterlist *scat;
++	int nents;
++
+ 	if (!scsi_sglist(scmnd) ||
+ 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
+ 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
+@@ -520,8 +523,20 @@ static void srp_unmap_data(struct scsi_c
+ 		req->fmr = NULL;
+ 	}
+ 
+-	ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd),
+-			scsi_sg_count(scmnd), scmnd->sc_data_direction);
++	/*
++	 * This handling of non-SG commands can be killed when the
++	 * SCSI midlayer no longer generates non-SG commands.
++	 */
++	if (likely(scsi_sg_count(scmnd))) {
++		nents = scsi_sg_count(scmnd);
++		scat  = scsi_sglist(scmnd);
++	} else {
++		nents = 1;
++		scat  = &req->fake_sg;
++	}
++
++	ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scat, nents,
++			scmnd->sc_data_direction);
+ }
+ 
+ static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
+@@ -721,8 +736,18 @@ static int srp_map_data(struct scsi_cmnd
+ 		return -EINVAL;
+ 	}
+ 
+-	nents = scsi_sg_count(scmnd);
+-	scat  = scsi_sglist(scmnd);
++	/*
++	 * This handling of non-SG commands can be killed when the
++	 * SCSI midlayer no longer generates non-SG commands.
++	 */
++	if (likely(scsi_sg_count(scmnd))) {
++		nents = scsi_sg_count(scmnd);
++		scat  = scsi_sglist(scmnd);
++	} else {
++		nents = 1;
++		scat  = &req->fake_sg;
++		sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
++	}
+ 
+ 	dev = target->srp_host->srp_dev;
+ 	ibdev = dev->dev;
+Index: ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srp/ib_srp.h
++++ ofed_kernel/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -111,6 +111,11 @@ struct srp_request {
+ 	struct srp_iu	       *cmd;
+ 	struct srp_iu	       *tsk_mgmt;
+ 	struct ib_pool_fmr     *fmr;
++	/*
++	 * Fake scatterlist used when scsi_sg_count(scmnd)==0.  Can be killed
++	 * when the SCSI midlayer no longer generates non-SG commands.
++	 */
++	struct scatterlist	fake_sg;
+ 	struct completion	done;
+ 	short			index;
+ 	u8			cmd_done;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srpt_class_dev.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srpt_class_dev.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/srpt_class_dev.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,121 @@
+---
+ drivers/infiniband/ulp/srpt/ib_srpt.c |   42 ++++++++++++++++------------------
+ drivers/infiniband/ulp/srpt/ib_srpt.h |    2 -
+ 2 files changed, 21 insertions(+), 23 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/ulp/srpt/ib_srpt.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ ofed_kernel/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2297,20 +2297,19 @@ struct scst_tgt_template srpt_template =
+ 	.task_mgmt_fn_done = srpt_tsk_mgmt_done
+ };
+ 
+-static void srpt_release_class_dev(struct device *dev)
++static void srpt_release_class_dev(struct class_device *class_dev)
+ {
+ }
+ 
+ static struct class srpt_class = {
+ 	.name = "infiniband_srpt",
+-	.dev_release = srpt_release_class_dev
++	.release = srpt_release_class_dev
+ };
+ 
+-static ssize_t show_login_info(struct device *dev,
+-			       struct device_attribute *attr, char *buf)
++static ssize_t show_login_info(struct class_device *class_dev, char *buf)
+ {
+ 	struct srpt_device *sdev =
+-		container_of(dev, struct srpt_device, dev);
++		container_of(class_dev, struct srpt_device, class_dev);
+ 	struct srpt_port *sport;
+ 	int i;
+ 	int len = 0;
+@@ -2338,16 +2337,15 @@ static ssize_t show_login_info(struct de
+ 	return len;
+ }
+ 
+-static DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
++static CLASS_DEVICE_ATTR(login_info, S_IRUGO, show_login_info, NULL);
+ 
+-static ssize_t show_mem_info(struct device *dev, struct device_attribute *attr,
+-			     char *buf)
++static ssize_t show_mem_info(struct class_device *class_dev, char *buf)
+ {
+ 	return sprintf(buf, "mem_avail= %d mem_elements= %d mem_size= %d\n",
+ 		       mem_avail, mem_elements, mem_size);
+ }
+ 
+-static DEVICE_ATTR(mem_info, S_IRUGO, show_mem_info, NULL);
++static CLASS_DEVICE_ATTR(mem_info, S_IRUGO, show_mem_info, NULL);
+ 
+ static void srpt_add_one(struct ib_device *device)
+ {
+@@ -2362,23 +2360,23 @@ static void srpt_add_one(struct ib_devic
+ 	sdev->device = device;
+ 	init_completion(&sdev->scst_released);
+ 
+-	sdev->dev.class = &srpt_class;
+-	sdev->dev.parent = device->dma_device;
+-	snprintf(sdev->dev.bus_id, BUS_ID_SIZE, "srpt-%s", device->name);
++	sdev->class_dev.class = &srpt_class;
++	sdev->class_dev.dev = device->dma_device;
++	snprintf(sdev->class_dev.class_id, BUS_ID_SIZE, "srpt-%s", device->name);
+ 
+-	if (device_register(&sdev->dev))
++	if (class_device_register(&sdev->class_dev))
+ 		goto free_dev;
+-	if (device_create_file(&sdev->dev, &dev_attr_login_info))
+-		goto err_dev;
+-	if (device_create_file(&sdev->dev, &dev_attr_mem_info))
+-		goto err_dev;
++	if (class_device_create_file(&sdev->class_dev, &class_device_attr_login_info))
++		goto err_class;
++	if (class_device_create_file(&sdev->class_dev, &class_device_attr_mem_info))
++		goto err_class;
+ 
+ 	if (ib_query_device(device, &sdev->dev_attr))
+-		goto err_dev;
++		goto err_class;
+ 
+ 	sdev->pd = ib_alloc_pd(device);
+ 	if (IS_ERR(sdev->pd))
+-		goto err_dev;
++		goto err_class;
+ 
+ 	sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
+ 	if (IS_ERR(sdev->mr))
+@@ -2450,8 +2448,8 @@ static void srpt_add_one(struct ib_devic
+ 	ib_dereg_mr(sdev->mr);
+       err_pd:
+ 	ib_dealloc_pd(sdev->pd);
+-      err_dev:
+-	device_unregister(&sdev->dev);
++      err_class:
++	class_device_unregister(&sdev->class_dev);
+       free_dev:
+ 	kfree(sdev);
+ }
+@@ -2472,7 +2470,7 @@ static void srpt_remove_one(struct ib_de
+ 	ib_destroy_srq(sdev->srq);
+ 	ib_dereg_mr(sdev->mr);
+ 	ib_dealloc_pd(sdev->pd);
+-	device_unregister(&sdev->dev);
++	class_device_unregister(&sdev->class_dev);
+ 
+ 	for (i = 0; i < SRPT_SRQ_SIZE; ++i)
+ 		srpt_free_ioctx(sdev, sdev->ioctx_ring[i]);
+Index: ofed_kernel/drivers/infiniband/ulp/srpt/ib_srpt.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/srpt/ib_srpt.h
++++ ofed_kernel/drivers/infiniband/ulp/srpt/ib_srpt.h
+@@ -171,7 +171,7 @@ struct srpt_device {
+ 	struct srpt_port port[2];
+ 	struct ib_event_handler event_handler;
+ 	struct completion scst_released;
+-	struct device dev;
++	struct class_device class_dev;
+ 
+ 	struct scst_tgt *scst_tgt;
+ };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_1_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_1_to_2_6_24.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_1_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+---
+ drivers/infiniband/core/uverbs_main.c |   19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/uverbs_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/uverbs_main.c
++++ ofed_kernel/drivers/infiniband/core/uverbs_main.c
+@@ -553,18 +553,23 @@ struct file *ib_uverbs_alloc_event_file(
+ 		goto err;
+ 	}
+ 
+-	/*
+-	 * fops_get() can't fail here, because we're coming from a
+-	 * system call on a uverbs file, which will already have a
+-	 * module reference.
+-	 */
+-	filp = alloc_file(uverbs_event_mnt, dget(uverbs_event_mnt->mnt_root),
+-			  FMODE_READ, fops_get(&uverbs_event_fops));
++	filp = get_empty_filp();
+ 	if (!filp) {
+ 		ret = -ENFILE;
+ 		goto err_fd;
+ 	}
+ 
++	/*
++	 * fops_get() can't fail here, because we're coming from a
++	 * system call on a uverbs file, which will already have a
++	 * module reference.
++	 */
++	filp->f_op 	   = fops_get(&uverbs_event_fops);
++	filp->f_path.mnt 	   = mntget(uverbs_event_mnt);
++	filp->f_path.dentry 	   = dget(uverbs_event_mnt->mnt_root);
++	filp->f_mapping    = filp->f_path.dentry->d_inode->i_mapping;
++	filp->f_flags      = O_RDONLY;
++	filp->f_mode       = FMODE_READ;
+ 	filp->private_data = ev_file;
+ 
+ 	return filp;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_2_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_2_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18-EL5.3/uverbs_main_2_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,70 @@
+---
+ drivers/infiniband/core/uverbs_main.c        |    6 +++---
+ drivers/infiniband/hw/ipath/ipath_file_ops.c |    4 ++--
+ drivers/infiniband/hw/ipath/ipath_fs.c       |    6 +++---
+ 3 files changed, 8 insertions(+), 8 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/core/uverbs_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/uverbs_main.c
++++ ofed_kernel/drivers/infiniband/core/uverbs_main.c
+@@ -565,9 +565,9 @@ struct file *ib_uverbs_alloc_event_file(
+ 	 * module reference.
+ 	 */
+ 	filp->f_op 	   = fops_get(&uverbs_event_fops);
+-	filp->f_path.mnt 	   = mntget(uverbs_event_mnt);
+-	filp->f_path.dentry 	   = dget(uverbs_event_mnt->mnt_root);
+-	filp->f_mapping    = filp->f_path.dentry->d_inode->i_mapping;
++	filp->f_vfsmnt 	   = mntget(uverbs_event_mnt);
++	filp->f_dentry 	   = dget(uverbs_event_mnt->mnt_root);
++	filp->f_mapping    = filp->f_dentry->d_inode->i_mapping;
+ 	filp->f_flags      = O_RDONLY;
+ 	filp->f_mode       = FMODE_READ;
+ 	filp->private_data = ev_file;
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -1868,9 +1868,9 @@ static int ipath_assign_port(struct file
+ 		goto done_chk_sdma;
+ 	}
+ 
+-	i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
++	i_minor = iminor(fp->f_dentry->d_inode) - IPATH_USER_MINOR_BASE;
+ 	ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
+-		   (long)fp->f_path.dentry->d_inode->i_rdev, i_minor);
++		   (long)fp->f_dentry->d_inode->i_rdev, i_minor);
+ 
+ 	if (i_minor)
+ 		ret = find_free_port(i_minor - 1, fp, uinfo);
+Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_fs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_fs.c
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_fs.c
+@@ -113,7 +113,7 @@ static ssize_t atomic_counters_read(stru
+ 	struct infinipath_counters counters;
+ 	struct ipath_devdata *dd;
+ 
+-	dd = file->f_path.dentry->d_inode->i_private;
++	dd = file->f_dentry->d_inode->i_private;
+ 	dd->ipath_f_read_counters(dd, &counters);
+ 
+ 	return simple_read_from_buffer(buf, count, ppos, &counters,
+@@ -153,7 +153,7 @@ static ssize_t flash_read(struct file *f
+ 		goto bail;
+ 	}
+ 
+-	dd = file->f_path.dentry->d_inode->i_private;
++	dd = file->f_dentry->d_inode->i_private;
+ 	if (ipath_eeprom_read(dd, pos, tmp, count)) {
+ 		ipath_dev_err(dd, "failed to read from flash\n");
+ 		ret = -ENXIO;
+@@ -206,7 +206,7 @@ static ssize_t flash_write(struct file *
+ 		goto bail_tmp;
+ 	}
+ 
+-	dd = file->f_path.dentry->d_inode->i_private;
++	dd = file->f_dentry->d_inode->i_private;
+ 	if (ipath_eeprom_write(dd, pos, tmp, count)) {
+ 		ret = -ENXIO;
+ 		ipath_dev_err(dd, "failed to write to flash\n");

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,13 +5,13 @@
 
 Signed-off-by: Doron Shoham <dorons at voltaire.com>
 ---
- drivers/scsi/scsi_transport_iscsi.c |   95 ++++++++++++++++++++----------------
- 1 file changed, 55 insertions(+), 40 deletions(-)
+ drivers/scsi/scsi_transport_iscsi.c |   97 +++++++++++++++++++++---------------
+ 1 file changed, 57 insertions(+), 40 deletions(-)
 
-Index: ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+Index: ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 ===================================================================
---- ofed_kernel.orig/drivers/scsi/scsi_transport_iscsi.c
-+++ ofed_kernel/drivers/scsi/scsi_transport_iscsi.c
+--- ofa_kernel-1.4.orig/drivers/scsi/scsi_transport_iscsi.c
++++ ofa_kernel-1.4/drivers/scsi/scsi_transport_iscsi.c
 @@ -20,6 +20,8 @@
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -21,7 +21,18 @@
  #include <linux/module.h>
  #include <linux/mutex.h>
  #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(stru
+@@ -378,8 +380,10 @@ static void __iscsi_unblock_session(stru
+ 	struct iscsi_cls_session *session =
+ 			container_of(work, struct iscsi_cls_session,
+ 				     unblock_work);
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
+ 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ 	struct iscsi_host *ihost = shost->shost_data;
++#endif
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -397,10 +401,12 @@ static void __iscsi_unblock_session(stru
  	 * the async scanning code (drivers like iscsi_tcp do login and
  	 * scanning from userspace).
  	 */
@@ -38,7 +49,7 @@
  }
  
  /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
+@@ -1294,45 +1300,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, s
   * Malformed skbs with wrong lengths or invalid creds are not processed.
   */
  static void
@@ -129,7 +140,7 @@
  	}
  	mutex_unlock(&rx_queue_mutex);
  }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(v
+@@ -1738,7 +1755,7 @@ static __init int iscsi_transport_init(v
  	return 0;
  
  release_nls:
@@ -138,7 +149,7 @@
  unregister_session_class:
  	transport_class_unregister(&iscsi_session_class);
  unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
+@@ -1753,7 +1770,7 @@ unregister_transport_class:
  static void __exit iscsi_transport_exit(void)
  {
  	destroy_workqueue(iscsi_eh_timer_workq);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_FC6/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.18_suse10_2/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.19/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.20/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.21/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipoib_to_2.6.23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipoib_to_2.6.23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/ipoib_to_2.6.23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -10,11 +10,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_main.c |   19 ++++------------
  3 files changed, 25 insertions(+), 32 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 19:00:27.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 19:30:31.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-17 16:53:49.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-17 16:56:10.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -23,7 +23,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -425,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +421,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -32,10 +32,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 19:00:27.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 19:30:31.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-17 16:53:56.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-17 16:57:35.000000000 +0300
 @@ -408,20 +408,19 @@ static int poll_tx(struct ipoib_dev_priv
  	return n == MAX_SEND_CQE;
  }
@@ -109,58 +109,40 @@
  }
  
  static void drain_tx_cq(struct net_device *dev)
-@@ -809,6 +811,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +710,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +829,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +896,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +904,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 19:00:28.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 19:31:25.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
- 
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
-+	if (ipoib_ib_dev_open(dev))
- 		return -EINVAL;
--	}
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-17 16:53:56.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-17 16:56:10.000000000 +0300
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -169,7 +151,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -1008,10 +1003,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +991,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -180,7 +162,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1060,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1048,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/rnfs_fs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/rnfs_fs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22/rnfs_fs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,1487 @@
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index cc91227..262397b 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -361,11 +361,14 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	const struct export_operations *nop = mnt->mnt_sb->s_export_op;
+ 	struct dentry *result, *alias;
+ 	int err;
++	__u32 objp[2];
+ 
++	objp[0] = fid->i32.ino;
++	objp[1] = fid->i32.gen;
+ 	/*
+ 	 * Try to get any dentry for the given file handle from the filesystem.
+ 	 */
+-	result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
++	result = nop->get_dentry(mnt->mnt_sb, &objp);
+ 	if (!result)
+ 		result = ERR_PTR(-ESTALE);
+ 	if (IS_ERR(result))
+@@ -417,11 +420,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 		 * file handle.  If this fails we'll have to give up.
+ 		 */
+ 		err = -ESTALE;
+-		if (!nop->fh_to_parent)
++		if (!nop->get_parent)
+ 			goto err_result;
+ 
+-		target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
+-				fh_len, fileid_type);
++		target_dir = nop->get_parent(result);
+ 		if (!target_dir)
+ 			goto err_result;
+ 		err = PTR_ERR(target_dir);
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index cf0d5c2..696f4b9 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -417,11 +417,18 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			ret = nlm_granted;
+ 			goto out;
+ 		case -EAGAIN:
+-			ret = nlm_lck_denied;
+-			goto out;
++			if (wait) {
++				ret = nlm_lck_blocked;
++				break;
++			} else {
++				ret = nlm_lck_denied;
++				goto out;
++			}
+ 		case FILE_LOCK_DEFERRED:
+-			if (wait)
++			if (wait) {
++				ret = nlm_lck_blocked;
+ 				break;
++			}
+ 			/* Filesystem lock operation is in progress
+ 			   Add it to the queue waiting for callback */
+ 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
+@@ -434,8 +441,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
+ 			goto out;
+ 	}
+ 
+-	ret = nlm_lck_blocked;
+-
+ 	/* Append to list of blocked */
+ 	nlmsvc_insert_block(block, NLM_NEVER);
+ out:
+diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
+index ac6170c..a538222 100644
+--- a/fs/nfs/Makefile
++++ b/fs/nfs/Makefile
+@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
+ 
+ nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
+ 			   direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+-			   write.o namespace.o mount_clnt.o
++			   write.o namespace.o mount_clnt.o \
++			   backport-namespace.o
+ nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
+ nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
+ nfs-$(CONFIG_NFS_V3_ACL)	+= nfs3acl.o
+diff --git a/fs/nfs/backport-namespace.c b/fs/nfs/backport-namespace.c
+new file mode 100644
+index 0000000..de57f8b
+--- /dev/null
++++ b/fs/nfs/backport-namespace.c
+@@ -0,0 +1 @@
++#include "src/namespace.c"
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 74f92b7..762f666 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -973,7 +973,7 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd)
+ 	if (nd->flags & LOOKUP_DIRECTORY)
+ 		return 0;
+ 	/* Are we trying to write to a read only partition? */
+-	if (__mnt_is_readonly(nd->path.mnt) &&
++	if (__mnt_is_readonly(nd->mnt) &&
+ 	    (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
+ 		return 0;
+ 	return 1;
+@@ -1907,7 +1907,7 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+ 	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
+ }
+ 
+-int nfs_permission(struct inode *inode, int mask)
++int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+ {
+ 	struct rpc_cred *cred;
+ 	int res = 0;
+@@ -1917,7 +1917,7 @@ int nfs_permission(struct inode *inode, int mask)
+ 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
+ 		goto out;
+ 	/* Is this sys_access() ? */
+-	if (mask & MAY_ACCESS)
++	if (nd != NULL && (nd->flags & LOOKUP_ACCESS))
+ 		goto force_lookup;
+ 
+ 	switch (inode->i_mode & S_IFMT) {
+@@ -1926,7 +1926,8 @@ int nfs_permission(struct inode *inode, int mask)
+ 		case S_IFREG:
+ 			/* NFSv4 has atomic_open... */
+ 			if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
+-					&& (mask & MAY_OPEN))
++					&& nd != NULL
++					&& (nd->flags & LOOKUP_OPEN))
+ 				goto out;
+ 			break;
+ 		case S_IFDIR:
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 7846065..d062e09 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -54,7 +54,6 @@ static int  nfs_file_fsync(struct file *, struct dentry *dentry, int datasync);
+ static int nfs_check_flags(int flags);
+ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
+ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
+ 
+ static struct vm_operations_struct nfs_file_vm_ops;
+ 
+@@ -77,7 +76,6 @@ const struct file_operations nfs_file_operations = {
+ 	.flock		= nfs_flock,
+ 	.splice_read	= nfs_file_splice_read,
+ 	.check_flags	= nfs_check_flags,
+-	.setlease	= nfs_setlease,
+ };
+ 
+ const struct inode_operations nfs_file_inode_operations = {
+@@ -178,8 +176,6 @@ force_reval:
+ 
+ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ {
+-	loff_t loff;
+-
+ 	dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
+ 			filp->f_path.dentry->d_parent->d_name.name,
+ 			filp->f_path.dentry->d_name.name,
+@@ -192,10 +188,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ 		if (retval < 0)
+ 			return (loff_t)retval;
+ 	}
+-	lock_kernel();	/* BKL needed? */
+-	loff = generic_file_llseek_unlocked(filp, offset, origin);
+-	unlock_kernel();
+-	return loff;
++	return remote_llseek(filp, offset, origin);
+ }
+ 
+ /*
+@@ -337,73 +330,37 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
+  * If the writer ends up delaying the write, the writer needs to
+  * increment the page use counts until he is done with the page.
+  */
+-static int nfs_write_begin(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len, unsigned flags,
+-			struct page **pagep, void **fsdata)
++static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+ {
+-	int ret;
+-	pgoff_t index;
+-	struct page *page;
+-	index = pos >> PAGE_CACHE_SHIFT;
+-
+-	dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
+-		mapping->host->i_ino, len, (long long) pos);
+-
+-	page = __grab_cache_page(mapping, index);
+-	if (!page)
+-		return -ENOMEM;
+-	*pagep = page;
+-
+-	ret = nfs_flush_incompatible(file, page);
+-	if (ret) {
+-		unlock_page(page);
+-		page_cache_release(page);
+-	}
+-	return ret;
++	return nfs_flush_incompatible(file, page);
+ }
+ 
+-static int nfs_write_end(struct file *file, struct address_space *mapping,
+-			loff_t pos, unsigned len, unsigned copied,
+-			struct page *page, void *fsdata)
++static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
+ {
+-	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ 	int status;
+ 
+-	dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
+-		file->f_path.dentry->d_parent->d_name.name,
+-		file->f_path.dentry->d_name.name,
+-		mapping->host->i_ino, len, (long long) pos);
+-
+ 	/*
+ 	 * Zero any uninitialised parts of the page, and then mark the page
+ 	 * as up to date if it turns out that we're extending the file.
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
+-		unsigned end = offset + len;
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
+-					end, PAGE_CACHE_SIZE);
++					to, PAGE_CACHE_SIZE);
+ 			SetPageUptodate(page);
+-		} else if (end >= pglen) {
+-			zero_user_segment(page, end, PAGE_CACHE_SIZE);
++		} else if (to >= pglen) {
++			zero_user_segment(page, to, PAGE_CACHE_SIZE);
+ 			if (offset == 0)
+ 				SetPageUptodate(page);
+ 		} else
+ 			zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
+ 	}
+ 
+-	status = nfs_updatepage(file, page, offset, copied);
+-
+-	unlock_page(page);
+-	page_cache_release(page);
++	status = nfs_updatepage(file, page, offset, to-offset);
+ 
+-	if (status < 0)
+-		return status;
+-	return copied;
++	return status;
+ }
+ 
+ static void nfs_invalidate_page(struct page *page, unsigned long offset)
+@@ -440,8 +397,8 @@ const struct address_space_operations nfs_file_aops = {
+ 	.set_page_dirty = __set_page_dirty_nobuffers,
+ 	.writepage = nfs_writepage,
+ 	.writepages = nfs_writepages,
+-	.write_begin = nfs_write_begin,
+-	.write_end = nfs_write_end,
++	.prepare_write = nfs_prepare_write,
++	.commit_write = nfs_commit_write,
+ 	.invalidatepage = nfs_invalidate_page,
+ 	.releasepage = nfs_release_page,
+ 	.direct_IO = nfs_direct_IO,
+@@ -484,7 +441,8 @@ out_unlock:
+ }
+ 
+ static struct vm_operations_struct nfs_file_vm_ops = {
+-	.fault = filemap_fault,
++	.nopage		= filemap_nopage,
++	.populate	= filemap_populate,
+ 	.page_mkwrite = nfs_vm_page_mkwrite,
+ };
+ 
+@@ -718,16 +676,3 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+ 		return do_unlk(filp, cmd, fl);
+ 	return do_setlk(filp, cmd, fl);
+ }
+-
+-/*
+- * There is no protocol support for leases, so we have no way to implement
+- * them correctly in the face of opens by other clients.
+- */
+-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+-{
+-	dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
+-			file->f_path.dentry->d_parent->d_name.name,
+-			file->f_path.dentry->d_name.name, arg);
+-
+-	return -EINVAL;
+-}
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 52daefa..f009da1 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1242,7 +1242,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
+ #endif
+ }
+ 
+-static void init_once(void *foo)
++static void init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
+ 
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 66df08d..f22d092 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -107,29 +107,29 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 
+ 	BUG_ON(IS_ROOT(dentry));
+ 	dprintk("%s: enter\n", __func__);
+-	dput(nd->path.dentry);
+-	nd->path.dentry = dget(dentry);
++	dput(nd->dentry);
++	nd->dentry = dget(dentry);
+ 
+ 	/* Look it up again */
+-	parent = dget_parent(nd->path.dentry);
++	parent = dget_parent(nd->dentry);
+ 	err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
+-						  &nd->path.dentry->d_name,
++						  &nd->dentry->d_name,
+ 						  &fh, &fattr);
+ 	dput(parent);
+ 	if (err != 0)
+ 		goto out_err;
+ 
+ 	if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
+-		mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
++		mnt = nfs_do_refmount(nd->mnt, nd->dentry);
+ 	else
+-		mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
++		mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh,
+ 				      &fattr);
+ 	err = PTR_ERR(mnt);
+ 	if (IS_ERR(mnt))
+ 		goto out_err;
+ 
+ 	mntget(mnt);
+-	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
++	err = do_add_mount(mnt, nd, nd->mnt->mnt_flags|MNT_SHRINKABLE,
+ 			   &nfs_automount_list);
+ 	if (err < 0) {
+ 		mntput(mnt);
+@@ -137,9 +137,9 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 			goto out_follow;
+ 		goto out_err;
+ 	}
+-	path_put(&nd->path);
+-	nd->path.mnt = mnt;
+-	nd->path.dentry = dget(mnt->mnt_root);
++	backport_path_put(nd);
++	nd->mnt = mnt;
++	nd->dentry = dget(mnt->mnt_root);
+ 	schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+ out:
+ 	dprintk("%s: done, returned %d\n", __func__, err);
+@@ -147,11 +147,11 @@ out:
+ 	dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
+ 	return ERR_PTR(err);
+ out_err:
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	goto out;
+ out_follow:
+-	while (d_mountpoint(nd->path.dentry) &&
+-	       follow_down(&nd->path.mnt, &nd->path.dentry))
++	while (d_mountpoint(nd->dentry) &&
++	       follow_down(&nd->mnt, &nd->dentry))
+ 		;
+ 	err = 0;
+ 	goto out;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c910413..3e2973e 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1384,7 +1384,7 @@ struct dentry *
+ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct dentry *parent;
+@@ -1421,8 +1421,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+ 	}
+ 	res = d_add_unique(dentry, igrab(state->inode));
+ 	if (res != NULL)
+-		path.dentry = res;
+-	nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
++		dentry = res;
++	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 	nfs_unblock_sillyrename(parent);
+ 	nfs4_intent_set_file(nd, &path, state);
+ 	return res;
+@@ -1432,7 +1432,7 @@ int
+ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct rpc_cred *cred;
+@@ -1880,7 +1880,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+                  int flags, struct nameidata *nd)
+ {
+ 	struct path path = {
+-		.mnt = nd->path.mnt,
++		.mnt = nd->mnt,
+ 		.dentry = dentry,
+ 	};
+ 	struct nfs4_state *state;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index e9b2017..ef60329 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -201,7 +201,7 @@ static match_table_t nfs_secflavor_tokens = {
+ };
+ 
+ 
+-static void nfs_umount_begin(struct super_block *);
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags);
+ static int  nfs_statfs(struct dentry *, struct kstatfs *);
+ static int  nfs_show_options(struct seq_file *, struct vfsmount *);
+ static int  nfs_show_stats(struct seq_file *, struct vfsmount *);
+@@ -287,10 +287,7 @@ static const struct super_operations nfs4_sops = {
+ };
+ #endif
+ 
+-static struct shrinker acl_shrinker = {
+-	.shrink		= nfs_access_cache_shrinker,
+-	.seeks		= DEFAULT_SEEKS,
+-};
++static struct shrinker *acl_shrinker;
+ 
+ /*
+  * Register the NFS filesystems
+@@ -299,7 +296,7 @@ int __init register_nfs_fs(void)
+ {
+ 	int ret;
+ 
+-        ret = register_filesystem(&nfs_fs_type);
++	ret = register_filesystem(&nfs_fs_type);
+ 	if (ret < 0)
+ 		goto error_0;
+ 
+@@ -311,7 +308,10 @@ int __init register_nfs_fs(void)
+ 	if (ret < 0)
+ 		goto error_2;
+ #endif
+-	register_shrinker(&acl_shrinker);
++	ret = init_mnt_writers();
++	if (ret)
++		printk(KERN_WARNING "Couldn't init mnt_writers\n");
++	acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ 	return 0;
+ 
+ #ifdef CONFIG_NFS_V4
+@@ -329,7 +329,8 @@ error_0:
+  */
+ void __exit unregister_nfs_fs(void)
+ {
+-	unregister_shrinker(&acl_shrinker);
++	if (acl_shrinker != NULL)
++		remove_shrinker(acl_shrinker);
+ #ifdef CONFIG_NFS_V4
+ 	unregister_filesystem(&nfs4_fs_type);
+ #endif
+@@ -649,11 +650,13 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+  * Begin unmount by attempting to remove all automounted mountpoints we added
+  * in response to xdev traversals and referrals
+  */
+-static void nfs_umount_begin(struct super_block *sb)
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
+ {
+-	struct nfs_server *server = NFS_SB(sb);
++	struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb);
+ 	struct rpc_clnt *rpc;
+ 
++	if (!(flags & MNT_FORCE))
++		return;
+ 	/* -EIO all pending I/O */
+ 	rpc = server->client_acl;
+ 	if (!IS_ERR(rpc))
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 9dc036f..860d944 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -168,14 +168,15 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 			goto out;
+ 
+ 		dprintk("Found the path %s\n", buf);
+-		key.ek_path = nd.path;
++		key.ek_path.dentry = nd.dentry;
++		key.ek_path.mnt = nd.mnt;
+ 
+ 		ek = svc_expkey_update(&key, ek);
+ 		if (ek)
+ 			cache_put(&ek->h, &svc_expkey_cache);
+ 		else
+ 			err = -ENOMEM;
+-		path_put(&nd.path);
++		backport_path_put(&nd);
+ 	}
+ 	cache_flush();
+  out:
+@@ -204,7 +205,7 @@ static int expkey_show(struct seq_file *m,
+ 	if (test_bit(CACHE_VALID, &h->flags) && 
+ 	    !test_bit(CACHE_NEGATIVE, &h->flags)) {
+ 		seq_printf(m, " ");
+-		seq_path(m, &ek->ek_path, "\\ \t\n");
++		seq_path(m, ek->ek_path.mnt, ek->ek_path.dentry, "\\ \t\n");
+ 	}
+ 	seq_printf(m, "\n");
+ 	return 0;
+@@ -346,7 +347,7 @@ static void svc_export_request(struct cache_detail *cd,
+ 	char *pth;
+ 
+ 	qword_add(bpp, blen, exp->ex_client->name);
+-	pth = d_path(&exp->ex_path, *bpp, *blen);
++	pth = d_path(exp->ex_path.dentry, exp->ex_path.mnt, *bpp, *blen);
+ 	if (IS_ERR(pth)) {
+ 		/* is this correct? */
+ 		(*bpp)[0] = '\n';
+@@ -385,7 +386,7 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
+ 	}
+ 
+ 	if (!inode->i_sb->s_export_op ||
+-	    !inode->i_sb->s_export_op->fh_to_dentry) {
++	    !inode->i_sb->s_export_op->get_dentry) {
+ 		dprintk("exp_export: export of invalid fs type.\n");
+ 		return -EINVAL;
+ 	}
+@@ -504,7 +505,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	struct svc_export exp, *expp;
+ 	int an_int;
+ 
+-	nd.path.dentry = NULL;
++	nd.dentry = NULL;
+ 	exp.ex_pathname = NULL;
+ 
+ 	/* fs locations */
+@@ -544,8 +545,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 
+ 	exp.h.flags = 0;
+ 	exp.ex_client = dom;
+-	exp.ex_path.mnt = nd.path.mnt;
+-	exp.ex_path.dentry = nd.path.dentry;
++	exp.ex_path.mnt = nd.mnt;
++	exp.ex_path.dentry = nd.dentry;
+ 	exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
+ 	err = -ENOMEM;
+ 	if (!exp.ex_pathname)
+@@ -607,7 +608,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 				goto out;
+ 		}
+ 
+-		err = check_export(nd.path.dentry->d_inode, exp.ex_flags,
++		err = check_export(nd.dentry->d_inode, exp.ex_flags,
+ 				   exp.ex_uuid);
+ 		if (err) goto out;
+ 	}
+@@ -626,8 +627,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	nfsd4_fslocs_free(&exp.ex_fslocs);
+ 	kfree(exp.ex_uuid);
+ 	kfree(exp.ex_pathname);
+-	if (nd.path.dentry)
+-		path_put(&nd.path);
++	if (nd.dentry)
++		backport_path_put(&nd);
+  out_no_path:
+ 	if (dom)
+ 		auth_domain_put(dom);
+@@ -650,7 +651,7 @@ static int svc_export_show(struct seq_file *m,
+ 		return 0;
+ 	}
+ 	exp = container_of(h, struct svc_export, h);
+-	seq_path(m, &exp->ex_path, " \t\n\\");
++	seq_path(m, exp->ex_path.mnt, exp->ex_path.dentry, " \t\n\\");
+ 	seq_putc(m, '\t');
+ 	seq_escape(m, exp->ex_client->name, " \t\n\\");
+ 	seq_putc(m, '(');
+@@ -672,6 +673,7 @@ static int svc_export_show(struct seq_file *m,
+ 	seq_puts(m, ")\n");
+ 	return 0;
+ }
++
+ static int svc_export_match(struct cache_head *a, struct cache_head *b)
+ {
+ 	struct svc_export *orig = container_of(a, struct svc_export, h);
+@@ -1026,7 +1028,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto out_put_clp;
+ 	err = -EINVAL;
+ 
+-	exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
+ 
+ 	memset(&new, 0, sizeof(new));
+ 
+@@ -1034,8 +1036,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if ((nxp->ex_flags & NFSEXP_FSID) &&
+ 	    (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
+ 	    fsid_key->ek_path.mnt &&
+-	    (fsid_key->ek_path.mnt != nd.path.mnt ||
+-	     fsid_key->ek_path.dentry != nd.path.dentry))
++	    (fsid_key->ek_path.mnt != nd.mnt ||
++	     fsid_key->ek_path.dentry != nd.dentry))
+ 		goto finish;
+ 
+ 	if (!IS_ERR(exp)) {
+@@ -1051,7 +1053,7 @@ exp_export(struct nfsctl_export *nxp)
+ 		goto finish;
+ 	}
+ 
+-	err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL);
++	err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL);
+ 	if (err) goto finish;
+ 
+ 	err = -ENOMEM;
+@@ -1064,7 +1066,8 @@ exp_export(struct nfsctl_export *nxp)
+ 	if (!new.ex_pathname)
+ 		goto finish;
+ 	new.ex_client = clp;
+-	new.ex_path = nd.path;
++	new.ex_path.mnt = nd.mnt;
++	new.ex_path.dentry = nd.dentry;
+ 	new.ex_flags = nxp->ex_flags;
+ 	new.ex_anon_uid = nxp->ex_anon_uid;
+ 	new.ex_anon_gid = nxp->ex_anon_gid;
+@@ -1090,7 +1093,7 @@ finish:
+ 		exp_put(exp);
+ 	if (fsid_key && !IS_ERR(fsid_key))
+ 		cache_put(&fsid_key->h, &svc_expkey_cache);
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ out_put_clp:
+ 	auth_domain_put(clp);
+ out_unlock:
+@@ -1143,8 +1146,8 @@ exp_unexport(struct nfsctl_export *nxp)
+ 		goto out_domain;
+ 
+ 	err = -EINVAL;
+-	exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
+-	path_put(&nd.path);
++	exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
++	backport_path_put(&nd);
+ 	if (IS_ERR(exp))
+ 		goto out_domain;
+ 
+@@ -1180,12 +1183,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 		printk("nfsd: exp_rootfh path not found %s", path);
+ 		return err;
+ 	}
+-	inode = nd.path.dentry->d_inode;
++	inode = nd.dentry->d_inode;
+ 
+ 	dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
+-		 path, nd.path.dentry, clp->name,
++		 path, nd.dentry, clp->name,
+ 		 inode->i_sb->s_id, inode->i_ino);
+-	exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL);
++	exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
+ 	if (IS_ERR(exp)) {
+ 		err = PTR_ERR(exp);
+ 		goto out;
+@@ -1195,7 +1198,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	 * fh must be initialized before calling fh_compose
+ 	 */
+ 	fh_init(&fh, maxsize);
+-	if (fh_compose(&fh, exp, nd.path.dentry, NULL))
++	if (fh_compose(&fh, exp, nd.dentry, NULL))
+ 		err = -EINVAL;
+ 	else
+ 		err = 0;
+@@ -1203,7 +1206,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
+ 	fh_put(&fh);
+ 	exp_put(exp);
+ out:
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return err;
+ }
+ 
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 145b3c8..ad22c29 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -121,9 +121,9 @@ out_no_tfm:
+ static void
+ nfsd4_sync_rec_dir(void)
+ {
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	nfsd_sync_dir(rec_dir.path.dentry);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	nfsd_sync_dir(rec_dir.dentry);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ }
+ 
+ int
+@@ -143,9 +143,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 	nfs4_save_user(&uid, &gid);
+ 
+ 	/* lock the parent */
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
+ 
+-	dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1);
++	dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		goto out_unlock;
+@@ -155,15 +155,15 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 		dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
+ 		goto out_put;
+ 	}
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out_put;
+-	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
+-	mnt_drop_write(rec_dir.path.mnt);
++	status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU);
++	mnt_drop_write(rec_dir.mnt);
+ out_put:
+ 	dput(dentry);
+ out_unlock:
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (status == 0) {
+ 		clp->cl_firststate = 1;
+ 		nfsd4_sync_rec_dir();
+@@ -226,7 +226,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
+ 
+ 	nfs4_save_user(&uid, &gid);
+ 
+-	filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY);
++	filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
+ 	status = PTR_ERR(filp);
+ 	if (IS_ERR(filp))
+ 		goto out;
+@@ -291,9 +291,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+ 
+ 	dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
+ 
+-	mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+-	dentry = lookup_one_len(name, rec_dir.path.dentry, namlen);
+-	mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
++	mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
++	dentry = lookup_one_len(name, rec_dir.dentry, namlen);
++	mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ 	if (IS_ERR(dentry)) {
+ 		status = PTR_ERR(dentry);
+ 		return status;
+@@ -302,7 +302,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+ 	if (!dentry->d_inode)
+ 		goto out;
+ 
+-	status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry);
++	status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
+ out:
+ 	dput(dentry);
+ 	return status;
+@@ -318,7 +318,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	if (!rec_dir_init || !clp->cl_firststate)
+ 		return;
+ 
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+ 	clp->cl_firststate = 0;
+@@ -327,7 +327,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
+ 	nfs4_reset_user(uid, gid);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("NFSD: Failed to remove expired client state directory"
+@@ -357,17 +357,17 @@ nfsd4_recdir_purge_old(void) {
+ 
+ 	if (!rec_dir_init)
+ 		return;
+-	status = mnt_want_write(rec_dir.path.mnt);
++	status = mnt_want_write(rec_dir.mnt);
+ 	if (status)
+ 		goto out;
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
+ 	if (status == 0)
+ 		nfsd4_sync_rec_dir();
+-	mnt_drop_write(rec_dir.path.mnt);
++	mnt_drop_write(rec_dir.mnt);
+ out:
+ 	if (status)
+ 		printk("nfsd4: failed to purge old clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ }
+ 
+ static int
+@@ -387,10 +387,10 @@ int
+ nfsd4_recdir_load(void) {
+ 	int status;
+ 
+-	status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir);
++	status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir);
+ 	if (status)
+ 		printk("nfsd4: failed loading clients from recovery"
+-			" directory %s\n", rec_dir.path.dentry->d_name.name);
++			" directory %s\n", rec_dir.dentry->d_name.name);
+ 	return status;
+ }
+ 
+@@ -429,5 +429,5 @@ nfsd4_shutdown_recdir(void)
+ 	if (!rec_dir_init)
+ 		return;
+ 	rec_dir_init = 0;
+-	path_put(&rec_dir.path);
++	backport_path_put(&rec_dir);
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 1578d7a..f72b403 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3294,11 +3294,11 @@ nfs4_reset_recoverydir(char *recdir)
+ 	if (status)
+ 		return status;
+ 	status = -ENOTDIR;
+-	if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
++	if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
+ 		nfs4_set_recdir(recdir);
+ 		status = 0;
+ 	}
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return status;
+ }
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index c53e65f..fc2871b 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -121,7 +121,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+ 
+ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
+ {
+-	ino_t ino =  file->f_path.dentry->d_inode->i_ino;
++	ino_t ino = file->f_dentry->d_inode->i_ino;
+ 	char *data;
+ 	ssize_t rv;
+ 
+@@ -360,9 +360,9 @@ static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+ 	if (error)
+ 		return error;
+ 
+-	error = nlmsvc_unlock_all_by_sb(nd.path.mnt->mnt_sb);
++	error = nlmsvc_unlock_all_by_sb(nd.mnt->mnt_sb);
+ 
+-	path_put(&nd.path);
++	backport_path_put(&nd);
+ 	return error;
+ }
+ 
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 18060be..36c4d71 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -23,7 +23,6 @@
+ #include <linux/file.h>
+ #include <linux/mount.h>
+ #include <linux/major.h>
+-#include <linux/splice.h>
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -828,53 +827,33 @@ found:
+ 	return ra;
+ }
+ 
+-/*
+- * Grab and keep cached pages associated with a file in the svc_rqst
+- * so that they can be passed to the network sendmsg/sendpage routines
+- * directly. They will be released after the sending has completed.
+- */
+ static int
+-nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+-		  struct splice_desc *sd)
++nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset , unsigned long size)
+ {
+-	struct svc_rqst *rqstp = sd->u.data;
+-	struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
+-	struct page *page = buf->page;
+-	size_t size;
+-	int ret;
++	unsigned long count = desc->count;
++	struct svc_rqst *rqstp = desc->arg.data;
+ 
+-	ret = buf->ops->confirm(pipe, buf);
+-	if (unlikely(ret))
+-		return ret;
+-
+-	size = sd->len;
++	if (size > count)
++		size = count;
+ 
+ 	if (rqstp->rq_res.page_len == 0) {
+ 		get_page(page);
+-		put_page(*pp);
+-		*pp = page;
+-		rqstp->rq_resused++;
+-		rqstp->rq_res.page_base = buf->offset;
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
++		rqstp->rq_res.page_base = offset;
+ 		rqstp->rq_res.page_len = size;
+-	} else if (page != pp[-1]) {
++	} else if (page != rqstp->rq_respages[rqstp->rq_resused-1]) {
+ 		get_page(page);
+-		if (*pp)
+-			put_page(*pp);
+-		*pp = page;
+-		rqstp->rq_resused++;
++		rqstp->rq_respages[rqstp->rq_resused++] = page;
+ 		rqstp->rq_res.page_len += size;
+-	} else
++	} else {
+ 		rqstp->rq_res.page_len += size;
++	}
+ 
++	desc->count = count - size;
++	desc->written += size;
+ 	return size;
+ }
+ 
+-static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
+-				    struct splice_desc *sd)
+-{
+-	return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
+-}
+-
+ static inline int svc_msnfs(struct svc_fh *ffhp)
+ {
+ #ifdef MSNFS
+@@ -906,16 +885,9 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 	if (ra && ra->p_set)
+ 		file->f_ra = ra->p_ra;
+ 
+-	if (file->f_op->splice_read && rqstp->rq_splice_ok) {
+-		struct splice_desc sd = {
+-			.len		= 0,
+-			.total_len	= *count,
+-			.pos		= offset,
+-			.u.data		= rqstp,
+-		};
+-
++	if (file->f_op->sendfile && rqstp->rq_sendfile_ok) {
+ 		rqstp->rq_resused = 1;
+-		host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
++		host_err = file->f_op->sendfile(file, &offset, *count, nfsd_read_actor, rqstp);
+ 	} else {
+ 		oldfs = get_fs();
+ 		set_fs(KERNEL_DS);
+diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
+index 27e772c..d932fb1 100644
+--- a/include/linux/exportfs.h
++++ b/include/linux/exportfs.h
+@@ -89,85 +89,9 @@ struct fid {
+ 	};
+ };
+ 
+-/**
+- * struct export_operations - for nfsd to communicate with file systems
+- * @encode_fh:      encode a file handle fragment from a dentry
+- * @fh_to_dentry:   find the implied object and get a dentry for it
+- * @fh_to_parent:   find the implied object's parent and get a dentry for it
+- * @get_name:       find the name for a given inode in a given directory
+- * @get_parent:     find the parent of a given directory
+- *
+- * See Documentation/filesystems/Exporting for details on how to use
+- * this interface correctly.
+- *
+- * encode_fh:
+- *    @encode_fh should store in the file handle fragment @fh (using at most
+- *    @max_len bytes) information that can be used by @decode_fh to recover the
+- *    file refered to by the &struct dentry @de.  If the @connectable flag is
+- *    set, the encode_fh() should store sufficient information so that a good
+- *    attempt can be made to find not only the file but also it's place in the
+- *    filesystem.   This typically means storing a reference to de->d_parent in
+- *    the filehandle fragment.  encode_fh() should return the number of bytes
+- *    stored or a negative error code such as %-ENOSPC
+- *
+- * fh_to_dentry:
+- *    @fh_to_dentry is given a &struct super_block (@sb) and a file handle
+- *    fragment (@fh, @fh_len). It should return a &struct dentry which refers
+- *    to the same file that the file handle fragment refers to.  If it cannot,
+- *    it should return a %NULL pointer if the file was found but no acceptable
+- *    &dentries were available, or an %ERR_PTR error code indicating why it
+- *    couldn't be found (e.g. %ENOENT or %ENOMEM).  Any suitable dentry can be
+- *    returned including, if necessary, a new dentry created with d_alloc_root.
+- *    The caller can then find any other extant dentries by following the
+- *    d_alias links.
+- *
+- * fh_to_parent:
+- *    Same as @fh_to_dentry, except that it returns a pointer to the parent
+- *    dentry if it was encoded into the filehandle fragment by @encode_fh.
+- *
+- * get_name:
+- *    @get_name should find a name for the given @child in the given @parent
+- *    directory.  The name should be stored in the @name (with the
+- *    understanding that it is already pointing to a a %NAME_MAX+1 sized
+- *    buffer.   get_name() should return %0 on success, a negative error code
+- *    or error.  @get_name will be called without @parent->i_mutex held.
+- *
+- * get_parent:
+- *    @get_parent should find the parent directory for the given @child which
+- *    is also a directory.  In the event that it cannot be found, or storage
+- *    space cannot be allocated, a %ERR_PTR should be returned.
+- *
+- * Locking rules:
+- *    get_parent is called with child->d_inode->i_mutex down
+- *    get_name is not (which is possibly inconsistent)
+- */
+-
+-struct export_operations {
+-	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+-			int connectable);
+-	struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
+-			int fh_len, int fh_type);
+-	int (*get_name)(struct dentry *parent, char *name,
+-			struct dentry *child);
+-	struct dentry * (*get_parent)(struct dentry *child);
+-};
+-
+ extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
+ 	int *max_len, int connectable);
+ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ 	int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+ 	void *context);
+-
+-/*
+- * Generic helpers for filesystems.
+- */
+-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+-	struct fid *fid, int fh_len, int fh_type,
+-	struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+-
+ #endif /* LINUX_EXPORTFS_H */
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 78a5922..3370498 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -331,7 +331,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+ extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-extern int nfs_permission(struct inode *, int);
++extern int nfs_permission(struct inode *, int, struct nameidata *);
+ extern int nfs_open(struct inode *, struct file *);
+ extern int nfs_release(struct inode *, struct file *);
+ extern int nfs_attribute_timeout(struct inode *inode);
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+deleted file mode 100644
+index 8e41202..0000000
+--- a/include/linux/pipe_fs_i.h
++++ /dev/null
+@@ -1,151 +0,0 @@
+-#ifndef _LINUX_PIPE_FS_I_H
+-#define _LINUX_PIPE_FS_I_H
+-
+-#define PIPEFS_MAGIC 0x50495045
+-
+-#define PIPE_BUFFERS (16)
+-
+-#define PIPE_BUF_FLAG_LRU	0x01	/* page is on the LRU */
+-#define PIPE_BUF_FLAG_ATOMIC	0x02	/* was atomically mapped */
+-#define PIPE_BUF_FLAG_GIFT	0x04	/* page is a gift */
+-
+-/**
+- *	struct pipe_buffer - a linux kernel pipe buffer
+- *	@page: the page containing the data for the pipe buffer
+- *	@offset: offset of data inside the @page
+- *	@len: length of data inside the @page
+- *	@ops: operations associated with this buffer. See @pipe_buf_operations.
+- *	@flags: pipe buffer flags. See above.
+- *	@private: private data owned by the ops.
+- **/
+-struct pipe_buffer {
+-	struct page *page;
+-	unsigned int offset, len;
+-	const struct pipe_buf_operations *ops;
+-	unsigned int flags;
+-	unsigned long private;
+-};
+-
+-/**
+- *	struct pipe_inode_info - a linux kernel pipe
+- *	@wait: reader/writer wait point in case of empty/full pipe
+- *	@nrbufs: the number of non-empty pipe buffers in this pipe
+- *	@curbuf: the current pipe buffer entry
+- *	@tmp_page: cached released page
+- *	@readers: number of current readers of this pipe
+- *	@writers: number of current writers of this pipe
+- *	@waiting_writers: number of writers blocked waiting for room
+- *	@r_counter: reader counter
+- *	@w_counter: writer counter
+- *	@fasync_readers: reader side fasync
+- *	@fasync_writers: writer side fasync
+- *	@inode: inode this pipe is attached to
+- *	@bufs: the circular array of pipe buffers
+- **/
+-struct pipe_inode_info {
+-	wait_queue_head_t wait;
+-	unsigned int nrbufs, curbuf;
+-	struct page *tmp_page;
+-	unsigned int readers;
+-	unsigned int writers;
+-	unsigned int waiting_writers;
+-	unsigned int r_counter;
+-	unsigned int w_counter;
+-	struct fasync_struct *fasync_readers;
+-	struct fasync_struct *fasync_writers;
+-	struct inode *inode;
+-	struct pipe_buffer bufs[PIPE_BUFFERS];
+-};
+-
+-/*
+- * Note on the nesting of these functions:
+- *
+- * ->confirm()
+- *	->steal()
+- *	...
+- *	->map()
+- *	...
+- *	->unmap()
+- *
+- * That is, ->map() must be called on a confirmed buffer,
+- * same goes for ->steal(). See below for the meaning of each
+- * operation. Also see kerneldoc in fs/pipe.c for the pipe
+- * and generic variants of these hooks.
+- */
+-struct pipe_buf_operations {
+-	/*
+-	 * This is set to 1, if the generic pipe read/write may coalesce
+-	 * data into an existing buffer. If this is set to 0, a new pipe
+-	 * page segment is always used for new data.
+-	 */
+-	int can_merge;
+-
+-	/*
+-	 * ->map() returns a virtual address mapping of the pipe buffer.
+-	 * The last integer flag reflects whether this should be an atomic
+-	 * mapping or not. The atomic map is faster, however you can't take
+-	 * page faults before calling ->unmap() again. So if you need to eg
+-	 * access user data through copy_to/from_user(), then you must get
+-	 * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
+-	 * atomic maps, so you can't map more than one pipe_buffer at once
+-	 * and you have to be careful if mapping another page as source
+-	 * or destination for a copy (IOW, it has to use something else
+-	 * than KM_USER0).
+-	 */
+-	void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
+-
+-	/*
+-	 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
+-	 */
+-	void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-
+-	/*
+-	 * ->confirm() verifies that the data in the pipe buffer is there
+-	 * and that the contents are good. If the pages in the pipe belong
+-	 * to a file system, we may need to wait for IO completion in this
+-	 * hook. Returns 0 for good, or a negative error value in case of
+-	 * error.
+-	 */
+-	int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * When the contents of this pipe buffer has been completely
+-	 * consumed by a reader, ->release() is called.
+-	 */
+-	void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Attempt to take ownership of the pipe buffer and its contents.
+-	 * ->steal() returns 0 for success, in which case the contents
+-	 * of the pipe (the buf->page) is locked and now completely owned
+-	 * by the caller. The page may then be transferred to a different
+-	 * mapping, the most often used case is insertion into different
+-	 * file address space cache.
+-	 */
+-	int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-	/*
+-	 * Get a reference to the pipe buffer.
+-	 */
+-	void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+-};
+-
+-/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+-   memory allocation, whereas PIPE_BUF makes atomicity guarantees.  */
+-#define PIPE_SIZE		PAGE_SIZE
+-
+-/* Drop the inode semaphore and wait for a pipe event, atomically */
+-void pipe_wait(struct pipe_inode_info *pipe);
+-
+-struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
+-void free_pipe_info(struct inode * inode);
+-void __free_pipe_info(struct pipe_inode_info *);
+-
+-/* Generic pipe buffer ops functions */
+-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
+-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
+-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+-int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+-
+-#endif
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+deleted file mode 100644
+index 528dcb9..0000000
+--- a/include/linux/splice.h
++++ /dev/null
+@@ -1,74 +0,0 @@
+-/*
+- * Function declerations and data structures related to the splice
+- * implementation.
+- *
+- * Copyright (C) 2007 Jens Axboe <jens.axboe at oracle.com>
+- *
+- */
+-#ifndef SPLICE_H
+-#define SPLICE_H
+-
+-#include <linux/pipe_fs_i.h>
+-
+-/*
+- * splice is tied to pipes as a transport (at least for now), so we'll just
+- * add the splice flags here.
+- */
+-#define SPLICE_F_MOVE	(0x01)	/* move pages instead of copying */
+-#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+-				 /* we may still block on the fd we splice */
+-				 /* from/to, of course */
+-#define SPLICE_F_MORE	(0x04)	/* expect more data */
+-#define SPLICE_F_GIFT	(0x08)	/* pages passed in are a gift */
+-
+-/*
+- * Passed to the actors
+- */
+-struct splice_desc {
+-	unsigned int len, total_len;	/* current and remaining length */
+-	unsigned int flags;		/* splice flags */
+-	/*
+-	 * actor() private data
+-	 */
+-	union {
+-		void __user *userptr;	/* memory to write to */
+-		struct file *file;	/* file to read/write */
+-		void *data;		/* cookie */
+-	} u;
+-	loff_t pos;			/* file position */
+-};
+-
+-struct partial_page {
+-	unsigned int offset;
+-	unsigned int len;
+-	unsigned long private;
+-};
+-
+-/*
+- * Passed to splice_to_pipe
+- */
+-struct splice_pipe_desc {
+-	struct page **pages;		/* page map */
+-	struct partial_page *partial;	/* pages[] may not be contig */
+-	int nr_pages;			/* number of pages in map */
+-	unsigned int flags;		/* splice flags */
+-	const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+-	void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+-};
+-
+-typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
+-			   struct splice_desc *);
+-typedef int (splice_direct_actor)(struct pipe_inode_info *,
+-				  struct splice_desc *);
+-
+-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
+-				loff_t *, size_t, unsigned int,
+-				splice_actor *);
+-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
+-				  struct splice_desc *, splice_actor *);
+-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
+-			      struct splice_pipe_desc *);
+-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+-				      splice_direct_actor *);
+-
+-#endif
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index dc69068..3a0f48f 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -255,7 +255,7 @@ struct svc_rqst {
+ 						 * determine what device number
+ 						 * to report (real or virtual)
+ 						 */
+-	int			rq_splice_ok;   /* turned off in gss privacy
++	int			rq_sendfile_ok;   /* turned off in gss privacy
+ 						 * to prevent encrypting page
+ 						 * cache pages */
+ 	wait_queue_head_t	rq_wait;	/* synchronization */
+diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
+index 6bfea9e..f0a110d 100644
+--- a/net/sunrpc/auth.c
++++ b/net/sunrpc/auth.c
+@@ -566,19 +566,16 @@ rpcauth_uptodatecred(struct rpc_task *task)
+ 		test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
+ }
+ 
+-static struct shrinker rpc_cred_shrinker = {
+-	.shrink = rpcauth_cache_shrinker,
+-	.seeks = DEFAULT_SEEKS,
+-};
++static struct shrinker *rpc_cred_shrinker;
+ 
+ void __init rpcauth_init_module(void)
+ {
+ 	rpc_init_authunix();
+ 	rpc_init_generic_auth();
+-	register_shrinker(&rpc_cred_shrinker);
++	rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
+ }
+ 
+ void __exit rpcauth_remove_module(void)
+ {
+-	unregister_shrinker(&rpc_cred_shrinker);
++	remove_shrinker(rpc_cred_shrinker);
+ }
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 81ae3d6..acfb1d1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -859,7 +859,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+ 	u32 priv_len, maj_stat;
+ 	int pad, saved_len, remaining_len, offset;
+ 
+-	rqstp->rq_splice_ok = 0;
++	rqstp->rq_sendfile_ok = 0;
+ 
+ 	priv_len = svc_getnl(&buf->head[0]);
+ 	if (rqstp->rq_deferred) {
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index 50b049c..5053a5f 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -264,7 +264,7 @@ rpc_proc_init(void)
+ 	dprintk("RPC:       registering /proc/net/rpc\n");
+ 	if (!proc_net_rpc) {
+ 		struct proc_dir_entry *ent;
+-		ent = proc_mkdir("rpc", init_net.proc_net);
++		ent = proc_mkdir("rpc", proc_net);
+ 		if (ent) {
+ 			ent->owner = THIS_MODULE;
+ 			proc_net_rpc = ent;
+@@ -278,7 +278,7 @@ rpc_proc_exit(void)
+ 	dprintk("RPC:       unregistering /proc/net/rpc\n");
+ 	if (proc_net_rpc) {
+ 		proc_net_rpc = NULL;
+-		remove_proc_entry("rpc", init_net.proc_net);
++		remove_proc_entry("rpc", proc_net);
+ 	}
+ }
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 5a32cb7..61a5616 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
+ 	switch (m->mode) {
+ 	case SVC_POOL_PERCPU:
+ 	{
+-		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
++		set_cpus_allowed(task, cpumask_of_cpu(node));
+ 		break;
+ 	}
+ 	case SVC_POOL_PERNODE:
+ 	{
+-		node_to_cpumask_ptr(nodecpumask, node);
+-		set_cpus_allowed_ptr(task, nodecpumask);
++		set_cpus_allowed(task, node_to_cpumask(node));
+ 		break;
+ 	}
+ 	}
+@@ -831,7 +830,7 @@ svc_process(struct svc_rqst *rqstp)
+ 	rqstp->rq_res.tail[0].iov_base = NULL;
+ 	rqstp->rq_res.tail[0].iov_len = 0;
+ 	/* Will be turned off only in gss privacy case: */
+-	rqstp->rq_splice_ok = 1;
++	rqstp->rq_sendfile_ok = 1;
+ 
+ 	/* Setup reply header */
+ 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 23a2b8f..0f4423e 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -14,7 +14,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+-#include <linux/fsnotify.h>
++#include <linux/dnotify.h>
+ #include <linux/kernel.h>
+ 
+ #include <asm/ioctls.h>
+@@ -495,7 +495,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
+ static void
+ rpc_release_path(struct nameidata *nd)
+ {
+-	path_put(&nd->path);
++	backport_path_put(nd);
+ 	rpc_put_mount();
+ }
+ 
+@@ -597,7 +597,6 @@ rpc_populate(struct dentry *parent,
+ 		if (S_ISDIR(mode))
+ 			inc_nlink(dir);
+ 		d_add(dentry, inode);
+-		fsnotify_create(dir, dentry);
+ 	}
+ 	mutex_unlock(&dir->i_mutex);
+ 	return 0;
+@@ -619,7 +618,7 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry)
+ 	inode->i_ino = iunique(dir->i_sb, 100);
+ 	d_instantiate(dentry, inode);
+ 	inc_nlink(dir);
+-	fsnotify_mkdir(dir, dentry);
++	inode_dir_notify(dir, DN_CREATE);
+ 	return 0;
+ out_err:
+ 	printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
+@@ -668,7 +667,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
+ 
+ 	if ((error = rpc_lookup_parent(path, nd)) != 0)
+ 		return ERR_PTR(error);
+-	dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
++	dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len,
+ 				   1);
+ 	if (IS_ERR(dentry))
+ 		rpc_release_path(nd);
+@@ -696,7 +695,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
+ 	dentry = rpc_lookup_negative(path, &nd);
+ 	if (IS_ERR(dentry))
+ 		return dentry;
+-	dir = nd.path.dentry->d_inode;
++	dir = nd.dentry->d_inode;
+ 	if ((error = __rpc_mkdir(dir, dentry)) != 0)
+ 		goto err_dput;
+ 	RPC_I(dentry->d_inode)->private = rpc_client;
+@@ -795,7 +794,7 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi
+ 	rpci->flags = flags;
+ 	rpci->ops = ops;
+ 	rpci->nkern_readwriters = 1;
+-	fsnotify_create(dir, dentry);
++	inode_dir_notify(dir, DN_CREATE);
+ 	dget(dentry);
+ out:
+ 	mutex_unlock(&dir->i_mutex);
+@@ -897,7 +896,7 @@ static struct file_system_type rpc_pipe_fs_type = {
+ };
+ 
+ static void
+-init_once(void *foo)
++init_once(void *foo, struct kmem_cache *cachep, unsigned long temp)
+ {
+ 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipoib_to_2.6.23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipoib_to_2.6.23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/ipoib_to_2.6.23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -10,11 +10,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_main.c |   19 ++++------------
  3 files changed, 25 insertions(+), 32 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 19:00:27.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 19:30:31.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-17 16:53:49.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-17 16:56:10.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -23,7 +23,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -425,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +421,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -32,10 +32,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 19:00:27.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 19:30:31.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-17 16:53:56.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-17 16:57:35.000000000 +0300
 @@ -408,20 +408,19 @@ static int poll_tx(struct ipoib_dev_priv
  	return n == MAX_SEND_CQE;
  }
@@ -109,58 +109,40 @@
  }
  
  static void drain_tx_cq(struct net_device *dev)
-@@ -809,6 +811,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +710,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +829,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +896,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +904,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 19:00:28.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 19:31:25.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
- 
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
-+	if (ipoib_ib_dev_open(dev))
- 		return -EINVAL;
--	}
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-17 16:53:56.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-17 16:56:10.000000000 +0300
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -169,7 +151,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -1008,10 +1003,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +991,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -180,7 +162,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1060,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1048,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.22_suse10_3/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipoib_to_2.6.23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipoib_to_2.6.23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/ipoib_to_2.6.23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -10,11 +10,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_main.c |   19 ++++------------
  3 files changed, 25 insertions(+), 32 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 19:00:27.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 19:30:31.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-17 16:53:49.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-17 16:56:10.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -23,7 +23,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -425,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +421,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -32,10 +32,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 19:00:27.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 19:30:31.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-17 16:53:56.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-17 16:57:35.000000000 +0300
 @@ -408,20 +408,19 @@ static int poll_tx(struct ipoib_dev_priv
  	return n == MAX_SEND_CQE;
  }
@@ -109,58 +109,40 @@
  }
  
  static void drain_tx_cq(struct net_device *dev)
-@@ -809,6 +811,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +710,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +829,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +896,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +904,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 19:00:28.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 19:31:25.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
- 
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
-+	if (ipoib_ib_dev_open(dev))
- 		return -EINVAL;
--	}
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-17 16:53:56.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-17 16:56:10.000000000 +0300
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -169,7 +151,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -1008,10 +1003,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +991,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -180,7 +162,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1060,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1048,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.23/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.24/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0095_pat.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0095_pat.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/ipath_0095_pat.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -448,7 +448,7 @@
 - *   2 is WC via PAT and over-ride chip-set wc errata and PAT checks
 - *   If PAT initialization fails, code reverts back to MTRR
 - */
--unsigned ipath_wc_pat; /* current default (0) is to use MTRR not PAT */
+-unsigned ipath_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
 -module_param_named(wc_pat, ipath_wc_pat, uint, S_IRUGO);
 -MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
 -

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/rnfs_fs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/rnfs_fs.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.25/rnfs_fs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,178 @@
+diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
+index ac6170c..0bae18e 100644
+--- a/fs/nfs/Makefile
++++ b/fs/nfs/Makefile
+@@ -6,7 +6,7 @@ obj-$(CONFIG_NFS_FS) += nfs.o
+ 
+ nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
+ 			   direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+-			   write.o namespace.o mount_clnt.o
++			   write.o namespace.o mount_clnt.o backport-namespace.o
+ nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
+ nfs-$(CONFIG_NFS_V3)	+= nfs3proc.o nfs3xdr.o
+ nfs-$(CONFIG_NFS_V3_ACL)	+= nfs3acl.o
+diff --git a/fs/nfs/backport-namespace.c b/fs/nfs/backport-namespace.c
+new file mode 100644
+index 0000000..de57f8b
+--- /dev/null
++++ b/fs/nfs/backport-namespace.c
+@@ -0,0 +1 @@
++#include "src/namespace.c"
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 74f92b7..986d990 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1907,7 +1907,7 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+ 	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
+ }
+ 
+-int nfs_permission(struct inode *inode, int mask)
++int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
+ {
+ 	struct rpc_cred *cred;
+ 	int res = 0;
+@@ -1917,7 +1917,7 @@ int nfs_permission(struct inode *inode, int mask)
+ 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
+ 		goto out;
+ 	/* Is this sys_access() ? */
+-	if (mask & MAY_ACCESS)
++	if (nd != NULL && (nd->flags & LOOKUP_ACCESS))
+ 		goto force_lookup;
+ 
+ 	switch (inode->i_mode & S_IFMT) {
+@@ -1926,7 +1926,8 @@ int nfs_permission(struct inode *inode, int mask)
+ 		case S_IFREG:
+ 			/* NFSv4 has atomic_open... */
+ 			if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
+-					&& (mask & MAY_OPEN))
++					&& nd != NULL
++					&& (nd->flags & LOOKUP_OPEN))
+ 				goto out;
+ 			break;
+ 		case S_IFDIR:
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 7846065..9f1bed9 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -178,8 +178,6 @@ force_reval:
+ 
+ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ {
+-	loff_t loff;
+-
+ 	dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
+ 			filp->f_path.dentry->d_parent->d_name.name,
+ 			filp->f_path.dentry->d_name.name,
+@@ -192,10 +190,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ 		if (retval < 0)
+ 			return (loff_t)retval;
+ 	}
+-	lock_kernel();	/* BKL needed? */
+-	loff = generic_file_llseek_unlocked(filp, offset, origin);
+-	unlock_kernel();
+-	return loff;
++	return remote_llseek(filp, offset, origin);
+ }
+ 
+ /*
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 52daefa..d8927e0 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1242,7 +1242,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
+ #endif
+ }
+ 
+-static void init_once(void *foo)
++static void init_once(struct kmem_cache *cachep, void *foo)
+ {
+ 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
+ 
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 66df08d..2f285ef 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -129,7 +129,7 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
+ 		goto out_err;
+ 
+ 	mntget(mnt);
+-	err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
++	err = do_add_mount(mnt, nd, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
+ 			   &nfs_automount_list);
+ 	if (err < 0) {
+ 		mntput(mnt);
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index e9b2017..19d380c 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -201,7 +201,7 @@ static match_table_t nfs_secflavor_tokens = {
+ };
+ 
+ 
+-static void nfs_umount_begin(struct super_block *);
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags);
+ static int  nfs_statfs(struct dentry *, struct kstatfs *);
+ static int  nfs_show_options(struct seq_file *, struct vfsmount *);
+ static int  nfs_show_stats(struct seq_file *, struct vfsmount *);
+@@ -649,11 +649,13 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+  * Begin unmount by attempting to remove all automounted mountpoints we added
+  * in response to xdev traversals and referrals
+  */
+-static void nfs_umount_begin(struct super_block *sb)
++static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
+ {
+-	struct nfs_server *server = NFS_SB(sb);
++	struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb);
+ 	struct rpc_clnt *rpc;
+ 
++	if (!(flags & MNT_FORCE))
++		return;
+ 	/* -EIO all pending I/O */
+ 	rpc = server->client_acl;
+ 	if (!IS_ERR(rpc))
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 78a5922..3370498 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -331,7 +331,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+ extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+ extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-extern int nfs_permission(struct inode *, int);
++extern int nfs_permission(struct inode *, int, struct nameidata *);
+ extern int nfs_open(struct inode *, struct file *);
+ extern int nfs_release(struct inode *, struct file *);
+ extern int nfs_attribute_timeout(struct inode *inode);
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 23a2b8f..5a9b0e7 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -897,7 +897,7 @@ static struct file_system_type rpc_pipe_fs_type = {
+ };
+ 
+ static void
+-init_once(void *foo)
++init_once(struct kmem_cache * cachep, void *foo)
+ {
+ 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 5a32cb7..3fba57a 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -310,13 +310,12 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
+ 	switch (m->mode) {
+ 	case SVC_POOL_PERCPU:
+ 	{
+-		set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
++		set_cpus_allowed(task, cpumask_of_cpu(node));
+ 		break;
+ 	}
+ 	case SVC_POOL_PERNODE:
+ 	{
+-		node_to_cpumask_ptr(nodecpumask, node);
+-		set_cpus_allowed_ptr(task, nodecpumask);
++		set_cpus_allowed(task, node_to_cpumask(node));
+ 		break;
+ 	}
+ 	}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.26/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.27_sles11/to_sles11.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.27_sles11/to_sles11.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.27_sles11/to_sles11.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,493 @@
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..5349778 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
+ 		scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+ 
+-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
++static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ {
+ 	struct iscsi_cls_session *cls_session;
+ 	struct iscsi_session *session;
+ 	struct iscsi_conn *conn;
+-	enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
++	enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ 
+ 	cls_session = starget_to_session(scsi_target(scmd->device));
+ 	session = cls_session->dd_data;
+@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ 		 * We are probably in the middle of iscsi recovery so let
+ 		 * that complete and handle the error.
+ 		 */
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 		goto done;
+ 	}
+ 
+ 	conn = session->leadconn;
+ 	if (!conn) {
+ 		/* In the middle of shuting down */
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 		goto done;
+ 	}
+ 
+@@ -1513,20 +1513,20 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ 	 */
+ 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+ 			    (conn->ping_timeout * HZ), jiffies))
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 	/*
+ 	 * if we are about to check the transport then give the command
+ 	 * more time
+ 	 */
+ 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
+ 			   jiffies))
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 	/* if in the middle of checking the transport then give us more time */
+ 	if (conn->ping_task)
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ done:
+ 	spin_unlock(&session->lock);
+-	debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
++	debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "nh");
+ 	return rc;
+ }
+ 
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 7846065..30541f0 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
+ 		file->f_path.dentry->d_name.name,
+ 		mapping->host->i_ino, len, (long long) pos);
+ 
+-	page = __grab_cache_page(mapping, index);
++	page = grab_cache_page_write_begin(mapping, index, flags);
+ 	if (!page)
+ 		return -ENOMEM;
+ 	*pagep = page;
+diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
+index 108f47e..2389a2e 100644
+--- a/include/linux/nfsd/nfsd.h
++++ b/include/linux/nfsd/nfsd.h
+@@ -85,7 +85,8 @@ __be32		nfsd_setattr(struct svc_rqst *, struct svc_fh *,
+ #ifdef CONFIG_NFSD_V4
+ __be32          nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
+                     struct nfs4_acl *);
+-int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
++int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, 
++		struct vfsmount *mnt, struct nfs4_acl **);
+ #endif /* CONFIG_NFSD_V4 */
+ __be32		nfsd_create(struct svc_rqst *, struct svc_fh *,
+ 				char *name, int len, struct iattr *attrs,
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 18060be..715ff2a 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -388,7 +388,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ 	err = nfserr_notsync;
+ 	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+ 		fh_lock(fhp);
+-		host_err = notify_change(dentry, iap);
++		host_err = notify_change(dentry, fhp->fh_export->ex_path.mnt, iap);
+ 		err = nfserrno(host_err);
+ 		fh_unlock(fhp);
+ 	}
+@@ -408,11 +408,12 @@ out_nfserr:
+ #if defined(CONFIG_NFSD_V2_ACL) || \
+     defined(CONFIG_NFSD_V3_ACL) || \
+     defined(CONFIG_NFSD_V4)
+-static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
++static ssize_t nfsd_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++			     char *key, void **buf)
+ {
+ 	ssize_t buflen;
+ 
+-	buflen = vfs_getxattr(dentry, key, NULL, 0);
++	buflen = vfs_getxattr(dentry, mnt, key, NULL, 0, NULL);
+ 	if (buflen <= 0)
+ 		return buflen;
+ 
+@@ -420,13 +421,14 @@ static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
+ 	if (!*buf)
+ 		return -ENOMEM;
+ 
+-	return vfs_getxattr(dentry, key, *buf, buflen);
++	return vfs_getxattr(dentry, mnt, key, *buf, buflen, NULL);
+ }
+ #endif
+ 
+ #if defined(CONFIG_NFSD_V4)
+ static int
+-set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
++set_nfsv4_acl_one(struct dentry *dentry, struct vfsmount *mnt,
++		  struct posix_acl *pacl, char *key)
+ {
+ 	int len;
+ 	size_t buflen;
+@@ -445,7 +447,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
+ 		goto out;
+ 	}
+ 
+-	error = vfs_setxattr(dentry, key, buf, len, 0);
++	error = vfs_setxattr(dentry, mnt, key, buf, len, 0, NULL);
+ out:
+ 	kfree(buf);
+ 	return error;
+@@ -458,6 +460,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	__be32 error;
+ 	int host_error;
+ 	struct dentry *dentry;
++	struct vfsmount *mnt;
+ 	struct inode *inode;
+ 	struct posix_acl *pacl = NULL, *dpacl = NULL;
+ 	unsigned int flags = 0;
+@@ -468,6 +471,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		return error;
+ 
+ 	dentry = fhp->fh_dentry;
++	mnt = fhp->fh_export->ex_path.mnt;
+ 	inode = dentry->d_inode;
+ 	if (S_ISDIR(inode->i_mode))
+ 		flags = NFS4_ACL_DIR;
+@@ -478,12 +482,14 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	} else if (host_error < 0)
+ 		goto out_nfserr;
+ 
+-	host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
++	host_error = set_nfsv4_acl_one(dentry, mnt, pacl,
++				       POSIX_ACL_XATTR_ACCESS);
+ 	if (host_error < 0)
+ 		goto out_release;
+ 
+ 	if (S_ISDIR(inode->i_mode))
+-		host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
++		host_error = set_nfsv4_acl_one(dentry, mnt, dpacl,
++					       POSIX_ACL_XATTR_DEFAULT);
+ 
+ out_release:
+ 	posix_acl_release(pacl);
+@@ -496,13 +502,13 @@ out_nfserr:
+ }
+ 
+ static struct posix_acl *
+-_get_posix_acl(struct dentry *dentry, char *key)
++_get_posix_acl(struct dentry *dentry, struct vfsmount *mnt, char *key)
+ {
+ 	void *buf = NULL;
+ 	struct posix_acl *pacl = NULL;
+ 	int buflen;
+ 
+-	buflen = nfsd_getxattr(dentry, key, &buf);
++	buflen = nfsd_getxattr(dentry, mnt, key, &buf);
+ 	if (!buflen)
+ 		buflen = -ENODATA;
+ 	if (buflen <= 0)
+@@ -514,14 +520,15 @@ _get_posix_acl(struct dentry *dentry, char *key)
+ }
+ 
+ int
+-nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl)
++nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
++		   struct vfsmount *mnt, struct nfs4_acl **acl)
+ {
+ 	struct inode *inode = dentry->d_inode;
+ 	int error = 0;
+ 	struct posix_acl *pacl = NULL, *dpacl = NULL;
+ 	unsigned int flags = 0;
+ 
+-	pacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_ACCESS);
++	pacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_ACCESS);
+ 	if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
+ 		pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+ 	if (IS_ERR(pacl)) {
+@@ -531,7 +538,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_ac
+ 	}
+ 
+ 	if (S_ISDIR(inode->i_mode)) {
+-		dpacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_DEFAULT);
++		dpacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_DEFAULT);
+ 		if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
+ 			dpacl = NULL;
+ 		else if (IS_ERR(dpacl)) {
+@@ -944,13 +951,13 @@ out:
+ 	return err;
+ }
+ 
+-static void kill_suid(struct dentry *dentry)
++static void kill_suid(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ 	struct iattr	ia;
+ 	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+ 
+ 	mutex_lock(&dentry->d_inode->i_mutex);
+-	notify_change(dentry, &ia);
++	notify_change(dentry, mnt, &ia);
+ 	mutex_unlock(&dentry->d_inode->i_mutex);
+ }
+ 
+@@ -1009,7 +1016,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 
+ 	/* clear setuid/setgid flag after write */
+ 	if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
+-		kill_suid(dentry);
++		kill_suid(dentry, exp->ex_path.mnt);
+ 
+ 	if (host_err >= 0 && stable) {
+ 		static ino_t	last_ino;
+@@ -1187,6 +1194,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		int type, dev_t rdev, struct svc_fh *resfhp)
+ {
+ 	struct dentry	*dentry, *dchild = NULL;
++	struct svc_export *exp;
+ 	struct inode	*dirp;
+ 	__be32		err;
+ 	__be32		err2;
+@@ -1204,6 +1212,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		goto out;
+ 
+ 	dentry = fhp->fh_dentry;
++	exp = fhp->fh_export;
+ 	dirp = dentry->d_inode;
+ 
+ 	err = nfserr_notdir;
+@@ -1220,7 +1229,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		host_err = PTR_ERR(dchild);
+ 		if (IS_ERR(dchild))
+ 			goto out_nfserr;
+-		err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
++		err = fh_compose(resfhp, exp, dchild, fhp);
+ 		if (err)
+ 			goto out;
+ 	} else {
+@@ -1270,13 +1279,14 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
+ 		break;
+ 	case S_IFDIR:
+-		host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
++		host_err = vfs_mkdir(dirp, dchild, exp->ex_path.mnt, iap->ia_mode);
+ 		break;
+ 	case S_IFCHR:
+ 	case S_IFBLK:
+ 	case S_IFIFO:
+ 	case S_IFSOCK:
+-		host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
++		host_err = vfs_mknod(dirp, dchild, exp->ex_path.mnt,
++				     iap->ia_mode, rdev);
+ 		break;
+ 	}
+ 	if (host_err < 0) {
+@@ -1284,7 +1294,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		goto out_nfserr;
+ 	}
+ 
+-	if (EX_ISSYNC(fhp->fh_export)) {
++	if (EX_ISSYNC(exp)) {
+ 		err = nfserrno(nfsd_sync_dir(dentry));
+ 		write_inode_now(dchild->d_inode, 1);
+ 	}
+@@ -1514,6 +1524,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 				struct iattr *iap)
+ {
+ 	struct dentry	*dentry, *dnew;
++	struct svc_export *exp;
+ 	__be32		err, cerr;
+ 	int		host_err;
+ 
+@@ -1538,6 +1549,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	if (host_err)
+ 		goto out_nfserr;
+ 
++	exp = fhp->fh_export;
+ 	if (unlikely(path[plen] != 0)) {
+ 		char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
+ 		if (path_alloced == NULL)
+@@ -1545,14 +1557,16 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		else {
+ 			strncpy(path_alloced, path, plen);
+ 			path_alloced[plen] = 0;
+-			host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced);
++			host_err = vfs_symlink(dentry->d_inode, dnew,
++					       exp->ex_path.mnt, path_alloced);
+ 			kfree(path_alloced);
+ 		}
+ 	} else
+-		host_err = vfs_symlink(dentry->d_inode, dnew, path);
++		host_err = vfs_symlink(dentry->d_inode, dnew, exp->ex_path.mnt,
++				       path);
+ 
+ 	if (!host_err) {
+-		if (EX_ISSYNC(fhp->fh_export))
++		if (EX_ISSYNC(exp))
+ 			host_err = nfsd_sync_dir(dentry);
+ 	}
+ 	err = nfserrno(host_err);
+@@ -1560,7 +1574,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 
+ 	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ 
+-	cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
++	cerr = fh_compose(resfhp, exp, dnew, fhp);
+ 	dput(dnew);
+ 	if (err==0) err = cerr;
+ out:
+@@ -1615,7 +1629,8 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
+ 		err = nfserrno(host_err);
+ 		goto out_dput;
+ 	}
+-	host_err = vfs_link(dold, dirp, dnew);
++	host_err = vfs_link(dold, tfhp->fh_export->ex_path.mnt, dirp,
++			    dnew, ffhp->fh_export->ex_path.mnt);
+ 	if (!host_err) {
+ 		if (EX_ISSYNC(ffhp->fh_export)) {
+ 			err = nfserrno(nfsd_sync_dir(ddir));
+@@ -1716,7 +1731,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
+ 	if (host_err)
+ 		goto out_dput_new;
+ 
+-	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
++	host_err = vfs_rename(fdir, odentry, ffhp->fh_export->ex_path.mnt,
++			      tdir, ndentry, tfhp->fh_export->ex_path.mnt);
+ 	if (!host_err && EX_ISSYNC(tfhp->fh_export)) {
+ 		host_err = nfsd_sync_dir(tdentry);
+ 		if (!host_err)
+@@ -1754,6 +1770,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ 				char *fname, int flen)
+ {
+ 	struct dentry	*dentry, *rdentry;
++	struct svc_export *exp;
+ 	struct inode	*dirp;
+ 	__be32		err;
+ 	int		host_err;
+@@ -1768,6 +1785,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ 	fh_lock_nested(fhp, I_MUTEX_PARENT);
+ 	dentry = fhp->fh_dentry;
+ 	dirp = dentry->d_inode;
++	exp = fhp->fh_export;
+ 
+ 	rdentry = lookup_one_len(fname, dentry, flen);
+ 	host_err = PTR_ERR(rdentry);
+@@ -1789,21 +1807,21 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ 
+ 	if (type != S_IFDIR) { /* It's UNLINK */
+ #ifdef MSNFS
+-		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
++		if ((exp->ex_flags & NFSEXP_MSNFS) &&
+ 			(atomic_read(&rdentry->d_count) > 1)) {
+ 			host_err = -EPERM;
+ 		} else
+ #endif
+-		host_err = vfs_unlink(dirp, rdentry);
++		host_err = vfs_unlink(dirp, rdentry, exp->ex_path.mnt);
+ 	} else { /* It's RMDIR */
+-		host_err = vfs_rmdir(dirp, rdentry);
++		host_err = vfs_rmdir(dirp, rdentry, exp->ex_path.mnt);
+ 	}
+ 
+ 	dput(rdentry);
+ 
+ 	if (host_err)
+ 		goto out_drop;
+-	if (EX_ISSYNC(fhp->fh_export))
++	if (EX_ISSYNC(exp))
+ 		host_err = nfsd_sync_dir(dentry);
+ 
+ out_drop:
+@@ -2036,7 +2054,8 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 	}
+ 
+-	size = nfsd_getxattr(fhp->fh_dentry, name, &value);
++	size = nfsd_getxattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt, name,
++			     &value);
+ 	if (size < 0)
+ 		return ERR_PTR(size);
+ 
+@@ -2048,6 +2067,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
+ int
+ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
+ {
++	struct vfsmount *mnt;
+ 	struct inode *inode = fhp->fh_dentry->d_inode;
+ 	char *name;
+ 	void *value = NULL;
+@@ -2080,21 +2100,24 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
+ 	} else
+ 		size = 0;
+ 
+-	error = mnt_want_write(fhp->fh_export->ex_path.mnt);
++	mnt = fhp->fh_export->ex_path.mnt;
++	error = mnt_want_write(mnt);
+ 	if (error)
+ 		goto getout;
+ 	if (size)
+-		error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0);
++		error = vfs_setxattr(fhp->fh_dentry, mnt, name, value, size, 0,
++				     NULL);
+ 	else {
+ 		if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
+ 			error = 0;
+ 		else {
+-			error = vfs_removexattr(fhp->fh_dentry, name);
++			error = vfs_removexattr(fhp->fh_dentry, mnt, name,
++						NULL);
+ 			if (error == -ENODATA)
+ 				error = 0;
+ 		}
+ 	}
+-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
++	mnt_drop_write(mnt);
+ 
+ getout:
+ 	kfree(value);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 14ba4d9..4fc3121 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1446,7 +1446,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
+ 	}
+ 	if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
+ 			| FATTR4_WORD0_SUPPORTED_ATTRS)) {
+-		err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
++		err = nfsd4_get_nfs4_acl(rqstp, dentry, exp->ex_path.mnt, &acl);
+ 		aclsupport = (err == 0);
+ 		if (bmval0 & FATTR4_WORD0_ACL) {
+ 			if (err == -EOPNOTSUPP)
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 145b3c8..2ca394f 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -158,7 +158,8 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 	status = mnt_want_write(rec_dir.path.mnt);
+ 	if (status)
+ 		goto out_put;
+-	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
++	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry,
++			   rec_dir.path.mnt, S_IRWXU);
+ 	mnt_drop_write(rec_dir.path.mnt);
+ out_put:
+ 	dput(dentry);
+@@ -263,7 +264,7 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
+ 		return -EINVAL;
+ 	}
+ 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-	status = vfs_unlink(dir->d_inode, dentry);
++	status = vfs_unlink(dir->d_inode, dentry, rec_dir.path.mnt);
+ 	mutex_unlock(&dir->d_inode->i_mutex);
+ 	return status;
+ }
+@@ -278,7 +279,7 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
+ 	 * a kernel from the future.... */
+ 	nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
+ 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-	status = vfs_rmdir(dir->d_inode, dentry);
++	status = vfs_rmdir(dir->d_inode, dentry, rec_dir.path.mnt);
+ 	mutex_unlock(&dir->d_inode->i_mutex);
+ 	return status;
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0100_iowrite32_copy.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0100_iowrite32_copy.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipath_0100_iowrite32_copy.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,88 +1,16 @@
-BACKPORT - backport iowrite32_copy to 2.6.15 and earlier
+BACKPORT - backport iowrite32_copy awareness to 2.6.15 and earlier
 
+Signed-off-by: John Gregor <john.gregor at qlogic.com>
 ---
- drivers/infiniband/hw/ipath/Makefile                |    1 
- drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S |   57 ++++++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_backport.h        |   48 ++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_kernel.h          |    1 
- drivers/infiniband/hw/ipath/ipath_verbs.h           |    2 
- 5 files changed, 109 insertions(+)
+ drivers/infiniband/hw/ipath/ipath_backport.h |   48 +++++++++++++++++++++++++++
+ drivers/infiniband/hw/ipath/ipath_kernel.h   |    1 
+ drivers/infiniband/hw/ipath/ipath_verbs.h    |    2 +
+ 3 files changed, 51 insertions(+)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/Makefile
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/Makefile
-+++ ofed_kernel/drivers/infiniband/hw/ipath/Makefile
-@@ -39,3 +39,4 @@ ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6
- 
- ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
- ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
-+ib_ipath-$(CONFIG_X86_64) += iowrite32_copy_x86_64.o
-Index: ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-===================================================================
 --- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-@@ -0,0 +1,57 @@
-+/*
-+ * Copyright (c) 2003, 2004, 2005. PathScale, Inc. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ */
-+
-+/**
-+ * __iowrite32_copy - copy a memory block using dword multiple writes
-+ *
-+ * This is primarily for writing to the InfiniPath PIO buffers, which
-+ * only support dword multiple writes, and thus can not use memcpy().
-+ * For this reason, we use nothing smaller than dword writes.
-+ * It is also used as a fast copy routine in some places that have been
-+ * measured to win over memcpy, and the performance delta matters.
-+ *
-+ * Count is number of dwords; might not be a qword multiple.
-+ */
-+
-+ 	.globl __iowrite32_copy
-+	.p2align 4
-+/* rdi	destination, rsi source, rdx count */
-+__iowrite32_copy:
-+	movl %edx,%ecx
-+	shrl $1,%ecx
-+	andl $1,%edx
-+	rep
-+	movsq
-+	movl %edx,%ecx
-+	rep
-+	movsd
-+	ret
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
-===================================================================
---- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 @@ -0,0 +1,48 @@
 +#ifndef _IPATH_BACKPORT_H
 +#define _IPATH_BACKPORT_H
@@ -132,10 +60,10 @@
 +void __iowrite32_copy(void __iomem * dst, const void *src, size_t count);
 +
 +#endif				/* _IPATH_BACKPORT_H */
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 @@ -47,6 +47,7 @@
  #include <asm/io.h>
  #include <rdma/ib_verbs.h>
@@ -144,10 +72,10 @@
  #include "ipath_common.h"
  #include "ipath_debug.h"
  #include "ipath_registers.h"
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 @@ -42,6 +42,8 @@
  #include <rdma/ib_pack.h>
  #include <rdma/ib_user_verbs.h>

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_x_001_2_6_9_disable_coal.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_x_001_2_6_9_disable_coal.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/ipoib_x_001_2_6_9_disable_coal.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,33 @@
+For RHEL4, need to disable coalescing when LRO is enabled,
+or get IPoIB performance degradation (TCP stack issue).
+
+Fixes Bugzilla 1494.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:25:20.000000000 +0200
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:34:39.000000000 +0200
+@@ -185,10 +185,17 @@
+ 
+ 	coal = kzalloc(sizeof *coal, GFP_KERNEL);
+ 	if (coal) {
+-		coal->rx_coalesce_usecs = 10;
+-		coal->tx_coalesce_usecs = 10;
+-		coal->rx_max_coalesced_frames = 16;
+-		coal->tx_max_coalesced_frames = 16;
++		if (dev->features & NETIF_F_LRO) {
++			coal->rx_coalesce_usecs = 0;
++			coal->tx_coalesce_usecs = 0;
++			coal->rx_max_coalesced_frames = 0;
++			coal->tx_max_coalesced_frames = 0;
++		} else {
++			coal->rx_coalesce_usecs = 10;
++			coal->tx_coalesce_usecs = 10;
++			coal->rx_max_coalesced_frames = 16;
++			coal->tx_max_coalesced_frames = 16;
++		}
+ 		dev->ethtool_ops->set_coalesce(dev, coal);
+ 		kfree(coal);
+ 	}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_300_to_2_6_13.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_300_to_2_6_13.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_300_to_2_6_13.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:09.000000000 -0600
-@@ -1640,7 +1640,6 @@ struct net_device *nes_netdev_init(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1628,7 +1628,6 @@ struct net_device *nes_netdev_init(struc
  	netdev->dev_addr[3] = (u8)(u64temp>>16);
  	netdev->dev_addr[4] = (u8)(u64temp>>8);
  	netdev->dev_addr[5] = (u8)u64temp;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_400_to_2_6_9.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_400_to_2_6_9.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/iw_nes_400_to_2_6_9.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-07 15:53:36.000000000 -0600
-@@ -1120,6 +1120,8 @@ static int nes_setup_mmap_qp(struct nes_
+--- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1017,6 +1017,8 @@ static int nes_setup_mmap_qp(struct nes_
  {
  	void *mem;
  	struct nes_device *nesdev = nesvnic->nesdev;
@@ -10,7 +10,7 @@
  
  	nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
  			(sizeof(struct nes_hw_qp_wqe) * rq_size) +
-@@ -1137,6 +1139,17 @@ static int nes_setup_mmap_qp(struct nes_
+@@ -1034,6 +1036,17 @@ static int nes_setup_mmap_qp(struct nes_
  			"host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
  			mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
  
@@ -28,7 +28,7 @@
  	memset(mem, 0, nesqp->qp_mem_size);
  
  	nesqp->hwqp.sq_vbase = mem;
-@@ -1509,6 +1522,8 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1409,6 +1422,8 @@ static int nes_destroy_qp(struct ib_qp *
  	/* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
  	struct nes_ucontext *nes_ucontext;
  	struct ib_qp_attr attr;
@@ -37,7 +37,7 @@
  	struct iw_cm_id *cm_id;
  	struct iw_cm_event cm_event;
  	int ret;
-@@ -1552,6 +1567,17 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1452,6 +1467,17 @@ static int nes_destroy_qp(struct ib_qp *
  			if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
  				nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
  			}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_0060_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_0060_sysfs.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_0060_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,18 +1,18 @@
-From 70e57d4e8e8df5b452a13f9b6a3c07f2df09e8a4 Mon Sep 17 00:00:00 2001
+From 20f52545712c4d0b91fb96df72ea5b1818685bc5 Mon Sep 17 00:00:00 2001
 From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 4 Dec 2008 13:40:39 +0200
+Date: Thu, 22 Jan 2009 09:41:18 +0200
 Subject: [PATCH] mlx4: Sysfs backport for RHAS4
 
 Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
 ---
- drivers/net/mlx4/main.c |   90 ++++++++++++++++++++++++++++++++--------------
- 1 files changed, 62 insertions(+), 28 deletions(-)
+ drivers/net/mlx4/main.c |   68 +++++++++++++++++++++++++++++++++++-----------
+ 1 files changed, 51 insertions(+), 17 deletions(-)
 
 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
-index 7df678d..1c01c38 100644
+index f87ebbc..fce2589 100644
 --- a/drivers/net/mlx4/main.c
 +++ b/drivers/net/mlx4/main.c
-@@ -399,18 +399,13 @@ out:
+@@ -397,18 +397,13 @@ out:
  	return err;
  }
  
@@ -34,7 +34,7 @@
  		sprintf(buf, "auto (%s)\n", type);
  	else
  		sprintf(buf, "%s\n", type);
-@@ -418,14 +413,33 @@ static ssize_t show_port_type(struct device *dev,
+@@ -416,14 +411,33 @@ static ssize_t show_port_type(struct device *dev,
  	return strlen(buf);
  }
  
@@ -42,11 +42,7 @@
 -			     struct device_attribute *attr,
 -			     const char *buf, size_t count)
 +static ssize_t show_port_type1(struct device *dev, char *buf)
- {
--	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
--						   port_attr);
--	struct mlx4_dev *mdev = info->dev;
--	struct mlx4_priv *priv = mlx4_priv(mdev);
++{
 +	struct pci_dev *pdev = to_pci_dev(dev);
 +	struct mlx4_dev *mdev = pci_get_drvdata(pdev);
 +
@@ -67,63 +63,18 @@
 +		return -ENODEV;
 +}
 +
-+static ssize_t store_port(struct mlx4_dev *dev, int port,
++static ssize_t store_port(struct mlx4_dev *mdev, int port,
 +			  const char *buf, size_t count)
-+{
-+	struct mlx4_priv *priv = mlx4_priv(dev);
+ {
+-	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+-						   port_attr);
+-	struct mlx4_dev *mdev = info->dev;
+ 	struct mlx4_priv *priv = mlx4_priv(mdev);
 +	struct mlx4_port_info *info = &priv->port[port];
  	enum mlx4_port_type types[MLX4_MAX_PORTS];
- 	enum mlx4_port_type tmp_type;
+ 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
  	int i;
-@@ -438,50 +452,70 @@ static ssize_t set_port_type(struct device *dev,
- 	else if (!strcmp(buf, "auto\n"))
- 		info->tmp_type = MLX4_PORT_TYPE_AUTO;
- 	else {
--		mlx4_err(mdev, "%s is not supported port type\n", buf);
-+		mlx4_err(dev, "%s is not supported port type\n", buf);
- 		return -EINVAL;
- 	}
- 
- 	mutex_lock(&priv->port_mutex);
--	mdev->caps.possible_type[info->port] = info->tmp_type;
-+	dev->caps.possible_type[info->port] = info->tmp_type; 
- 	if (info->tmp_type == MLX4_PORT_TYPE_AUTO) {
--		err = mlx4_SENSE_PORT(mdev, info->port, &tmp_type);
-+		err = mlx4_SENSE_PORT(dev, info->port, &tmp_type);
- 		if (!err && (tmp_type == MLX4_PORT_TYPE_ETH ||
- 			     tmp_type == MLX4_PORT_TYPE_IB)) {
- 			info->tmp_type = tmp_type;
- 		}
- 	}
- 
--	for (i = 0; i < mdev->caps.num_ports; i++)
-+	for (i = 0; i < dev->caps.num_ports; i++)
- 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
--					mdev->caps.possible_type[i+1];
-+					dev->caps.possible_type[i+1];
- 
- 	if (priv->trig) {
--		if (++priv->changed_ports < mdev->caps.num_ports)
-+		if (++priv->changed_ports < dev->caps.num_ports)
- 			goto out;
- 		else
- 			priv->trig = priv->changed_ports = 0;
- 	}
--	err = mlx4_check_port_params(mdev, types);
-+	err = mlx4_check_port_params(dev, types);
- 	if (err)
- 		goto out;
- 
--	for (i = 0; i < mdev->caps.num_ports; i++) {
-+	for (i = 0; i < dev->caps.num_ports; i++) {
- 		priv->port[i + 1].tmp_type = 0;
- 	}
- 
--	err = mlx4_change_port_types(mdev, types);
-+	err = mlx4_change_port_types(dev, types);
- 
- out:
- 	mutex_unlock(&priv->port_mutex);
+@@ -482,6 +496,24 @@ out:
  	return err ? err : count;
  }
  
@@ -145,9 +96,11 @@
 +	return store_port(mdev, 2, buf, count);
 +}
 +
-+
- static ssize_t trigger_port(struct device *dev,
--			    struct device_attribute *attr,
+ /*
+  * This function is invoked if user wants to modify all port types
+  * at once. We will wait for all the ports to be assigned new values,
+@@ -491,7 +523,9 @@ static ssize_t trigger_port(struct device *dev,
+ 			    struct device_attribute *attr,
  			    const char *buf, size_t count)
  {
 -	struct mlx4_priv *priv = container_of(attr, struct mlx4_priv, trigger_attr);
@@ -157,7 +110,7 @@
  	if (!priv)
  		return -ENODEV;
  
-@@ -1121,8 +1155,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+@@ -1131,8 +1165,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
  
  	sprintf(info->dev_name, "mlx4_port%d", port);
  	memcpy(&info->port_attr.attr, &attr, sizeof(attr));

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,7 +6,7 @@
 ===================================================================
 --- ofed_kernel-2.6.9_U4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
 +++ ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -576,7 +576,6 @@ adjudge_to_death:
+@@ -580,7 +580,6 @@ adjudge_to_death:
  		/* TODO: tcp_fin_time to get timeout */
  		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
  			atomic_read(&sk->sk_refcnt));
@@ -14,15 +14,16 @@
  	}
  
  	/* TODO: limit number of orphaned sockets.
-@@ -850,7 +849,6 @@ void sdp_cancel_dreq_wait_timeout(struct
+@@ -860,8 +859,6 @@ void sdp_cancel_dreq_wait_timeout(struct
  		/* The timeout hasn't reached - need to clean ref count */
  		sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
  	}
+-
 -	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
  }
  
  void sdp_destroy_work(struct work_struct *work)
-@@ -890,9 +888,6 @@ void sdp_dreq_wait_timeout_work(struct w
+@@ -901,9 +898,6 @@ void sdp_dreq_wait_timeout_work(struct w
  
  	sdp_sk(sk)->dreq_wait_timeout = 0;
  
@@ -32,7 +33,7 @@
  	sdp_exch_state(sk, TCPF_LAST_ACK | TCPF_FIN_WAIT1, TCP_TIME_WAIT);
  
  	release_sock(sk);
-@@ -2131,7 +2126,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
+@@ -2163,7 +2157,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
  
  static atomic_t sockets_allocated;
  static atomic_t memory_allocated;
@@ -40,7 +41,7 @@
  static int memory_pressure;
  struct proto sdp_proto = {
          .close       = sdp_close,
-@@ -2152,13 +2146,11 @@ struct proto sdp_proto = {
+@@ -2184,13 +2177,11 @@ struct proto sdp_proto = {
  	.sockets_allocated = &sockets_allocated,
  	.memory_allocated = &memory_allocated,
  	.memory_pressure = &memory_pressure,
@@ -56,7 +57,7 @@
  	.name	     = "SDP",
  };
  
-@@ -2517,9 +2509,6 @@ static void __exit sdp_exit(void)
+@@ -2569,9 +2560,6 @@ static void __exit sdp_exit(void)
  	sock_unregister(PF_INET_SDP);
  	proto_unregister(&sdp_proto);
  

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/t3_hw_to_2_6_5-7_244.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/t3_hw_to_2_6_5-7_244.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U4/t3_hw_to_2_6_5-7_244.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,13 +1,8 @@
----
- drivers/net/cxgb3/adapter.h |    1 +
- drivers/net/cxgb3/t3_hw.c   |    6 ++----
- 2 files changed, 3 insertions(+), 4 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -194,6 +194,7 @@ struct adapter {
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 21dad82..1c4e828 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -207,6 +207,7 @@ struct adapter {
  	struct list_head adapter_list;
  	void __iomem *regs;
  	struct pci_dev *pdev;
@@ -15,11 +10,11 @@
  	unsigned long registered_device_map;
  	unsigned long open_device_map;
  	unsigned long flags;
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3494,7 +3494,7 @@ static int t3_reset_adapter(struct adapt
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index 533fc74..f6c000e 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3534,7 +3534,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  	uint16_t devid = 0;
  
  	if (save_and_restore_pcie)
@@ -28,7 +23,7 @@
  	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
  
  	/*
-@@ -3512,7 +3512,7 @@ static int t3_reset_adapter(struct adapt
+@@ -3552,7 +3552,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  		return -1;
  
  	if (save_and_restore_pcie)
@@ -37,12 +32,12 @@
  	return 0;
  }
  
-@@ -3640,8 +3640,6 @@ int t3_prep_adapter(struct adapter *adap
+@@ -3688,8 +3688,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  
  		memcpy(adapter->port[i]->dev_addr, hw_addr,
  		       ETH_ALEN);
 -		memcpy(adapter->port[i]->perm_addr, hw_addr,
 -		       ETH_ALEN);
- 		init_link_config(&p->link_config, p->port_type->caps);
+ 		init_link_config(&p->link_config, p->phy.caps);
  		p->phy.ops->power_down(&p->phy, 1);
- 		if (!(p->port_type->caps & SUPPORTED_IRQ))
+ 		if (!(p->phy.caps & SUPPORTED_IRQ))

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0100_iowrite32_copy.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0100_iowrite32_copy.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipath_0100_iowrite32_copy.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,88 +1,16 @@
-BACKPORT - backport iowrite32_copy to 2.6.15 and earlier
+BACKPORT - backport iowrite32_copy awareness to 2.6.15 and earlier
 
+Signed-off-by: John Gregor <john.gregor at qlogic.com>
 ---
- drivers/infiniband/hw/ipath/Makefile                |    1 
- drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S |   57 ++++++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_backport.h        |   48 ++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_kernel.h          |    1 
- drivers/infiniband/hw/ipath/ipath_verbs.h           |    2 
- 5 files changed, 109 insertions(+)
+ drivers/infiniband/hw/ipath/ipath_backport.h |   48 +++++++++++++++++++++++++++
+ drivers/infiniband/hw/ipath/ipath_kernel.h   |    1 
+ drivers/infiniband/hw/ipath/ipath_verbs.h    |    2 +
+ 3 files changed, 51 insertions(+)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/Makefile
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/Makefile
-+++ ofed_kernel/drivers/infiniband/hw/ipath/Makefile
-@@ -39,3 +39,4 @@ ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6
- 
- ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
- ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
-+ib_ipath-$(CONFIG_X86_64) += iowrite32_copy_x86_64.o
-Index: ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-===================================================================
 --- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-@@ -0,0 +1,57 @@
-+/*
-+ * Copyright (c) 2003, 2004, 2005. PathScale, Inc. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ */
-+
-+/**
-+ * __iowrite32_copy - copy a memory block using dword multiple writes
-+ *
-+ * This is primarily for writing to the InfiniPath PIO buffers, which
-+ * only support dword multiple writes, and thus can not use memcpy().
-+ * For this reason, we use nothing smaller than dword writes.
-+ * It is also used as a fast copy routine in some places that have been
-+ * measured to win over memcpy, and the performance delta matters.
-+ *
-+ * Count is number of dwords; might not be a qword multiple.
-+ */
-+
-+ 	.globl __iowrite32_copy
-+	.p2align 4
-+/* rdi	destination, rsi source, rdx count */
-+__iowrite32_copy:
-+	movl %edx,%ecx
-+	shrl $1,%ecx
-+	andl $1,%edx
-+	rep
-+	movsq
-+	movl %edx,%ecx
-+	rep
-+	movsd
-+	ret
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
-===================================================================
---- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 @@ -0,0 +1,48 @@
 +#ifndef _IPATH_BACKPORT_H
 +#define _IPATH_BACKPORT_H
@@ -132,10 +60,10 @@
 +void __iowrite32_copy(void __iomem * dst, const void *src, size_t count);
 +
 +#endif				/* _IPATH_BACKPORT_H */
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 @@ -47,6 +47,7 @@
  #include <asm/io.h>
  #include <rdma/ib_verbs.h>
@@ -144,10 +72,10 @@
  #include "ipath_common.h"
  #include "ipath_debug.h"
  #include "ipath_registers.h"
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 @@ -42,6 +42,8 @@
  #include <rdma/ib_pack.h>
  #include <rdma/ib_user_verbs.h>

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_x_001_2_6_9_disable_coal.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_x_001_2_6_9_disable_coal.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/ipoib_x_001_2_6_9_disable_coal.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,33 @@
+For RHEL4, need to disable coalescing when LRO is enabled,
+or get IPoIB performance degradation (TCP stack issue).
+
+Fixes Bugzilla 1494.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:25:20.000000000 +0200
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:34:39.000000000 +0200
+@@ -185,10 +185,17 @@
+ 
+ 	coal = kzalloc(sizeof *coal, GFP_KERNEL);
+ 	if (coal) {
+-		coal->rx_coalesce_usecs = 10;
+-		coal->tx_coalesce_usecs = 10;
+-		coal->rx_max_coalesced_frames = 16;
+-		coal->tx_max_coalesced_frames = 16;
++		if (dev->features & NETIF_F_LRO) {
++			coal->rx_coalesce_usecs = 0;
++			coal->tx_coalesce_usecs = 0;
++			coal->rx_max_coalesced_frames = 0;
++			coal->tx_max_coalesced_frames = 0;
++		} else {
++			coal->rx_coalesce_usecs = 10;
++			coal->tx_coalesce_usecs = 10;
++			coal->rx_max_coalesced_frames = 16;
++			coal->tx_max_coalesced_frames = 16;
++		}
+ 		dev->ethtool_ops->set_coalesce(dev, coal);
+ 		kfree(coal);
+ 	}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_300_to_2_6_13.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_300_to_2_6_13.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_300_to_2_6_13.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:09.000000000 -0600
-@@ -1640,7 +1640,6 @@ struct net_device *nes_netdev_init(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1628,7 +1628,6 @@ struct net_device *nes_netdev_init(struc
  	netdev->dev_addr[3] = (u8)(u64temp>>16);
  	netdev->dev_addr[4] = (u8)(u64temp>>8);
  	netdev->dev_addr[5] = (u8)u64temp;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_400_to_2_6_9.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_400_to_2_6_9.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/iw_nes_400_to_2_6_9.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-07 15:53:36.000000000 -0600
-@@ -1120,6 +1120,8 @@ static int nes_setup_mmap_qp(struct nes_
+--- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1017,6 +1017,8 @@ static int nes_setup_mmap_qp(struct nes_
  {
  	void *mem;
  	struct nes_device *nesdev = nesvnic->nesdev;
@@ -10,7 +10,7 @@
  
  	nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
  			(sizeof(struct nes_hw_qp_wqe) * rq_size) +
-@@ -1137,6 +1139,17 @@ static int nes_setup_mmap_qp(struct nes_
+@@ -1034,6 +1036,17 @@ static int nes_setup_mmap_qp(struct nes_
  			"host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
  			mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
  
@@ -28,7 +28,7 @@
  	memset(mem, 0, nesqp->qp_mem_size);
  
  	nesqp->hwqp.sq_vbase = mem;
-@@ -1509,6 +1522,8 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1409,6 +1422,8 @@ static int nes_destroy_qp(struct ib_qp *
  	/* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
  	struct nes_ucontext *nes_ucontext;
  	struct ib_qp_attr attr;
@@ -37,7 +37,7 @@
  	struct iw_cm_id *cm_id;
  	struct iw_cm_event cm_event;
  	int ret;
-@@ -1552,6 +1567,17 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1452,6 +1467,17 @@ static int nes_destroy_qp(struct ib_qp *
  			if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
  				nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
  			}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_0060_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_0060_sysfs.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_0060_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,18 +1,18 @@
-From 70e57d4e8e8df5b452a13f9b6a3c07f2df09e8a4 Mon Sep 17 00:00:00 2001
+From 20f52545712c4d0b91fb96df72ea5b1818685bc5 Mon Sep 17 00:00:00 2001
 From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 4 Dec 2008 13:40:39 +0200
+Date: Thu, 22 Jan 2009 09:41:18 +0200
 Subject: [PATCH] mlx4: Sysfs backport for RHAS4
 
 Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
 ---
- drivers/net/mlx4/main.c |   90 ++++++++++++++++++++++++++++++++--------------
- 1 files changed, 62 insertions(+), 28 deletions(-)
+ drivers/net/mlx4/main.c |   68 +++++++++++++++++++++++++++++++++++-----------
+ 1 files changed, 51 insertions(+), 17 deletions(-)
 
 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
-index 7df678d..1c01c38 100644
+index f87ebbc..fce2589 100644
 --- a/drivers/net/mlx4/main.c
 +++ b/drivers/net/mlx4/main.c
-@@ -399,18 +399,13 @@ out:
+@@ -397,18 +397,13 @@ out:
  	return err;
  }
  
@@ -34,7 +34,7 @@
  		sprintf(buf, "auto (%s)\n", type);
  	else
  		sprintf(buf, "%s\n", type);
-@@ -418,14 +413,33 @@ static ssize_t show_port_type(struct device *dev,
+@@ -416,14 +411,33 @@ static ssize_t show_port_type(struct device *dev,
  	return strlen(buf);
  }
  
@@ -42,11 +42,7 @@
 -			     struct device_attribute *attr,
 -			     const char *buf, size_t count)
 +static ssize_t show_port_type1(struct device *dev, char *buf)
- {
--	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
--						   port_attr);
--	struct mlx4_dev *mdev = info->dev;
--	struct mlx4_priv *priv = mlx4_priv(mdev);
++{
 +	struct pci_dev *pdev = to_pci_dev(dev);
 +	struct mlx4_dev *mdev = pci_get_drvdata(pdev);
 +
@@ -67,63 +63,18 @@
 +		return -ENODEV;
 +}
 +
-+static ssize_t store_port(struct mlx4_dev *dev, int port,
++static ssize_t store_port(struct mlx4_dev *mdev, int port,
 +			  const char *buf, size_t count)
-+{
-+	struct mlx4_priv *priv = mlx4_priv(dev);
+ {
+-	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+-						   port_attr);
+-	struct mlx4_dev *mdev = info->dev;
+ 	struct mlx4_priv *priv = mlx4_priv(mdev);
 +	struct mlx4_port_info *info = &priv->port[port];
  	enum mlx4_port_type types[MLX4_MAX_PORTS];
- 	enum mlx4_port_type tmp_type;
+ 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
  	int i;
-@@ -438,50 +452,70 @@ static ssize_t set_port_type(struct device *dev,
- 	else if (!strcmp(buf, "auto\n"))
- 		info->tmp_type = MLX4_PORT_TYPE_AUTO;
- 	else {
--		mlx4_err(mdev, "%s is not supported port type\n", buf);
-+		mlx4_err(dev, "%s is not supported port type\n", buf);
- 		return -EINVAL;
- 	}
- 
- 	mutex_lock(&priv->port_mutex);
--	mdev->caps.possible_type[info->port] = info->tmp_type;
-+	dev->caps.possible_type[info->port] = info->tmp_type; 
- 	if (info->tmp_type == MLX4_PORT_TYPE_AUTO) {
--		err = mlx4_SENSE_PORT(mdev, info->port, &tmp_type);
-+		err = mlx4_SENSE_PORT(dev, info->port, &tmp_type);
- 		if (!err && (tmp_type == MLX4_PORT_TYPE_ETH ||
- 			     tmp_type == MLX4_PORT_TYPE_IB)) {
- 			info->tmp_type = tmp_type;
- 		}
- 	}
- 
--	for (i = 0; i < mdev->caps.num_ports; i++)
-+	for (i = 0; i < dev->caps.num_ports; i++)
- 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
--					mdev->caps.possible_type[i+1];
-+					dev->caps.possible_type[i+1];
- 
- 	if (priv->trig) {
--		if (++priv->changed_ports < mdev->caps.num_ports)
-+		if (++priv->changed_ports < dev->caps.num_ports)
- 			goto out;
- 		else
- 			priv->trig = priv->changed_ports = 0;
- 	}
--	err = mlx4_check_port_params(mdev, types);
-+	err = mlx4_check_port_params(dev, types);
- 	if (err)
- 		goto out;
- 
--	for (i = 0; i < mdev->caps.num_ports; i++) {
-+	for (i = 0; i < dev->caps.num_ports; i++) {
- 		priv->port[i + 1].tmp_type = 0;
- 	}
- 
--	err = mlx4_change_port_types(mdev, types);
-+	err = mlx4_change_port_types(dev, types);
- 
- out:
- 	mutex_unlock(&priv->port_mutex);
+@@ -482,6 +496,24 @@ out:
  	return err ? err : count;
  }
  
@@ -145,9 +96,11 @@
 +	return store_port(mdev, 2, buf, count);
 +}
 +
-+
- static ssize_t trigger_port(struct device *dev,
--			    struct device_attribute *attr,
+ /*
+  * This function is invoked if user wants to modify all port types
+  * at once. We will wait for all the ports to be assigned new values,
+@@ -491,7 +523,9 @@ static ssize_t trigger_port(struct device *dev,
+ 			    struct device_attribute *attr,
  			    const char *buf, size_t count)
  {
 -	struct mlx4_priv *priv = container_of(attr, struct mlx4_priv, trigger_attr);
@@ -157,7 +110,7 @@
  	if (!priv)
  		return -ENODEV;
  
-@@ -1121,8 +1155,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+@@ -1131,8 +1165,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
  
  	sprintf(info->dev_name, "mlx4_port%d", port);
  	memcpy(&info->port_attr.attr, &attr, sizeof(attr));

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,7 +6,7 @@
 ===================================================================
 --- ofed_kernel-2.6.9_U4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
 +++ ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -576,7 +576,6 @@ adjudge_to_death:
+@@ -580,7 +580,6 @@ adjudge_to_death:
  		/* TODO: tcp_fin_time to get timeout */
  		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
  			atomic_read(&sk->sk_refcnt));
@@ -14,15 +14,16 @@
  	}
  
  	/* TODO: limit number of orphaned sockets.
-@@ -850,7 +849,6 @@ void sdp_cancel_dreq_wait_timeout(struct
+@@ -860,8 +859,6 @@ void sdp_cancel_dreq_wait_timeout(struct
  		/* The timeout hasn't reached - need to clean ref count */
  		sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
  	}
+-
 -	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
  }
  
  void sdp_destroy_work(struct work_struct *work)
-@@ -890,9 +888,6 @@ void sdp_dreq_wait_timeout_work(struct w
+@@ -901,9 +898,6 @@ void sdp_dreq_wait_timeout_work(struct w
  
  	sdp_sk(sk)->dreq_wait_timeout = 0;
  
@@ -32,7 +33,7 @@
  	sdp_exch_state(sk, TCPF_LAST_ACK | TCPF_FIN_WAIT1, TCP_TIME_WAIT);
  
  	release_sock(sk);
-@@ -2131,7 +2126,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
+@@ -2163,7 +2157,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
  
  static atomic_t sockets_allocated;
  static atomic_t memory_allocated;
@@ -40,7 +41,7 @@
  static int memory_pressure;
  struct proto sdp_proto = {
          .close       = sdp_close,
-@@ -2152,13 +2146,11 @@ struct proto sdp_proto = {
+@@ -2184,13 +2177,11 @@ struct proto sdp_proto = {
  	.sockets_allocated = &sockets_allocated,
  	.memory_allocated = &memory_allocated,
  	.memory_pressure = &memory_pressure,
@@ -56,7 +57,7 @@
  	.name	     = "SDP",
  };
  
-@@ -2517,9 +2509,6 @@ static void __exit sdp_exit(void)
+@@ -2569,9 +2560,6 @@ static void __exit sdp_exit(void)
  	sock_unregister(PF_INET_SDP);
  	proto_unregister(&sdp_proto);
  

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/t3_hw_to_2_6_5-7_244.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/t3_hw_to_2_6_5-7_244.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U5/t3_hw_to_2_6_5-7_244.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,13 +1,8 @@
----
- drivers/net/cxgb3/adapter.h |    1 +
- drivers/net/cxgb3/t3_hw.c   |    6 ++----
- 2 files changed, 3 insertions(+), 4 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -194,6 +194,7 @@ struct adapter {
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 21dad82..1c4e828 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -207,6 +207,7 @@ struct adapter {
  	struct list_head adapter_list;
  	void __iomem *regs;
  	struct pci_dev *pdev;
@@ -15,11 +10,11 @@
  	unsigned long registered_device_map;
  	unsigned long open_device_map;
  	unsigned long flags;
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3494,7 +3494,7 @@ static int t3_reset_adapter(struct adapt
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index 533fc74..f6c000e 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3534,7 +3534,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  	uint16_t devid = 0;
  
  	if (save_and_restore_pcie)
@@ -28,7 +23,7 @@
  	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
  
  	/*
-@@ -3512,7 +3512,7 @@ static int t3_reset_adapter(struct adapt
+@@ -3552,7 +3552,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  		return -1;
  
  	if (save_and_restore_pcie)
@@ -37,12 +32,12 @@
  	return 0;
  }
  
-@@ -3640,8 +3640,6 @@ int t3_prep_adapter(struct adapter *adap
+@@ -3688,8 +3688,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  
  		memcpy(adapter->port[i]->dev_addr, hw_addr,
  		       ETH_ALEN);
 -		memcpy(adapter->port[i]->perm_addr, hw_addr,
 -		       ETH_ALEN);
- 		init_link_config(&p->link_config, p->port_type->caps);
+ 		init_link_config(&p->link_config, p->phy.caps);
  		p->phy.ops->power_down(&p->phy, 1);
- 		if (!(p->port_type->caps & SUPPORTED_IRQ))
+ 		if (!(p->phy.caps & SUPPORTED_IRQ))

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0100_iowrite32_copy.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0100_iowrite32_copy.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipath_0100_iowrite32_copy.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,88 +1,16 @@
-BACKPORT - backport iowrite32_copy to 2.6.15 and earlier
+BACKPORT - backport iowrite32_copy awareness to 2.6.15 and earlier
 
+Signed-off-by: John Gregor <john.gregor at qlogic.com>
 ---
- drivers/infiniband/hw/ipath/Makefile                |    1 
- drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S |   57 ++++++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_backport.h        |   48 ++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_kernel.h          |    1 
- drivers/infiniband/hw/ipath/ipath_verbs.h           |    2 
- 5 files changed, 109 insertions(+)
+ drivers/infiniband/hw/ipath/ipath_backport.h |   48 +++++++++++++++++++++++++++
+ drivers/infiniband/hw/ipath/ipath_kernel.h   |    1 
+ drivers/infiniband/hw/ipath/ipath_verbs.h    |    2 +
+ 3 files changed, 51 insertions(+)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/Makefile
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/Makefile
-+++ ofed_kernel/drivers/infiniband/hw/ipath/Makefile
-@@ -39,3 +39,4 @@ ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6
- 
- ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
- ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
-+ib_ipath-$(CONFIG_X86_64) += iowrite32_copy_x86_64.o
-Index: ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-===================================================================
 --- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-@@ -0,0 +1,57 @@
-+/*
-+ * Copyright (c) 2003, 2004, 2005. PathScale, Inc. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ */
-+
-+/**
-+ * __iowrite32_copy - copy a memory block using dword multiple writes
-+ *
-+ * This is primarily for writing to the InfiniPath PIO buffers, which
-+ * only support dword multiple writes, and thus can not use memcpy().
-+ * For this reason, we use nothing smaller than dword writes.
-+ * It is also used as a fast copy routine in some places that have been
-+ * measured to win over memcpy, and the performance delta matters.
-+ *
-+ * Count is number of dwords; might not be a qword multiple.
-+ */
-+
-+ 	.globl __iowrite32_copy
-+	.p2align 4
-+/* rdi	destination, rsi source, rdx count */
-+__iowrite32_copy:
-+	movl %edx,%ecx
-+	shrl $1,%ecx
-+	andl $1,%edx
-+	rep
-+	movsq
-+	movl %edx,%ecx
-+	rep
-+	movsd
-+	ret
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
-===================================================================
---- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 @@ -0,0 +1,48 @@
 +#ifndef _IPATH_BACKPORT_H
 +#define _IPATH_BACKPORT_H
@@ -132,10 +60,10 @@
 +void __iowrite32_copy(void __iomem * dst, const void *src, size_t count);
 +
 +#endif				/* _IPATH_BACKPORT_H */
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 @@ -47,6 +47,7 @@
  #include <asm/io.h>
  #include <rdma/ib_verbs.h>
@@ -144,10 +72,10 @@
  #include "ipath_common.h"
  #include "ipath_debug.h"
  #include "ipath_registers.h"
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 @@ -42,6 +42,8 @@
  #include <rdma/ib_pack.h>
  #include <rdma/ib_user_verbs.h>

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_x_001_2_6_9_disable_coal.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_x_001_2_6_9_disable_coal.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/ipoib_x_001_2_6_9_disable_coal.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,33 @@
+For RHEL4, need to disable coalescing when LRO is enabled,
+or get IPoIB performance degradation (TCP stack issue).
+
+Fixes Bugzilla 1494.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:25:20.000000000 +0200
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:34:39.000000000 +0200
+@@ -185,10 +185,17 @@
+ 
+ 	coal = kzalloc(sizeof *coal, GFP_KERNEL);
+ 	if (coal) {
+-		coal->rx_coalesce_usecs = 10;
+-		coal->tx_coalesce_usecs = 10;
+-		coal->rx_max_coalesced_frames = 16;
+-		coal->tx_max_coalesced_frames = 16;
++		if (dev->features & NETIF_F_LRO) {
++			coal->rx_coalesce_usecs = 0;
++			coal->tx_coalesce_usecs = 0;
++			coal->rx_max_coalesced_frames = 0;
++			coal->tx_max_coalesced_frames = 0;
++		} else {
++			coal->rx_coalesce_usecs = 10;
++			coal->tx_coalesce_usecs = 10;
++			coal->rx_max_coalesced_frames = 16;
++			coal->tx_max_coalesced_frames = 16;
++		}
+ 		dev->ethtool_ops->set_coalesce(dev, coal);
+ 		kfree(coal);
+ 	}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_300_to_2_6_13.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_300_to_2_6_13.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_300_to_2_6_13.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:09.000000000 -0600
-@@ -1640,7 +1640,6 @@ struct net_device *nes_netdev_init(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1628,7 +1628,6 @@ struct net_device *nes_netdev_init(struc
  	netdev->dev_addr[3] = (u8)(u64temp>>16);
  	netdev->dev_addr[4] = (u8)(u64temp>>8);
  	netdev->dev_addr[5] = (u8)u64temp;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_400_to_2_6_9.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_400_to_2_6_9.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/iw_nes_400_to_2_6_9.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-07 15:53:36.000000000 -0600
-@@ -1120,6 +1120,8 @@ static int nes_setup_mmap_qp(struct nes_
+--- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1017,6 +1017,8 @@ static int nes_setup_mmap_qp(struct nes_
  {
  	void *mem;
  	struct nes_device *nesdev = nesvnic->nesdev;
@@ -10,7 +10,7 @@
  
  	nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
  			(sizeof(struct nes_hw_qp_wqe) * rq_size) +
-@@ -1137,6 +1139,17 @@ static int nes_setup_mmap_qp(struct nes_
+@@ -1034,6 +1036,17 @@ static int nes_setup_mmap_qp(struct nes_
  			"host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
  			mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
  
@@ -28,7 +28,7 @@
  	memset(mem, 0, nesqp->qp_mem_size);
  
  	nesqp->hwqp.sq_vbase = mem;
-@@ -1509,6 +1522,8 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1409,6 +1422,8 @@ static int nes_destroy_qp(struct ib_qp *
  	/* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
  	struct nes_ucontext *nes_ucontext;
  	struct ib_qp_attr attr;
@@ -37,7 +37,7 @@
  	struct iw_cm_id *cm_id;
  	struct iw_cm_event cm_event;
  	int ret;
-@@ -1552,6 +1567,17 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1452,6 +1467,17 @@ static int nes_destroy_qp(struct ib_qp *
  			if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
  				nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
  			}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_0060_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_0060_sysfs.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_0060_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,18 +1,18 @@
-From 70e57d4e8e8df5b452a13f9b6a3c07f2df09e8a4 Mon Sep 17 00:00:00 2001
+From 20f52545712c4d0b91fb96df72ea5b1818685bc5 Mon Sep 17 00:00:00 2001
 From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 4 Dec 2008 13:40:39 +0200
+Date: Thu, 22 Jan 2009 09:41:18 +0200
 Subject: [PATCH] mlx4: Sysfs backport for RHAS4
 
 Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
 ---
- drivers/net/mlx4/main.c |   90 ++++++++++++++++++++++++++++++++--------------
- 1 files changed, 62 insertions(+), 28 deletions(-)
+ drivers/net/mlx4/main.c |   68 +++++++++++++++++++++++++++++++++++-----------
+ 1 files changed, 51 insertions(+), 17 deletions(-)
 
 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
-index 7df678d..1c01c38 100644
+index f87ebbc..fce2589 100644
 --- a/drivers/net/mlx4/main.c
 +++ b/drivers/net/mlx4/main.c
-@@ -399,18 +399,13 @@ out:
+@@ -397,18 +397,13 @@ out:
  	return err;
  }
  
@@ -34,7 +34,7 @@
  		sprintf(buf, "auto (%s)\n", type);
  	else
  		sprintf(buf, "%s\n", type);
-@@ -418,14 +413,33 @@ static ssize_t show_port_type(struct device *dev,
+@@ -416,14 +411,33 @@ static ssize_t show_port_type(struct device *dev,
  	return strlen(buf);
  }
  
@@ -42,11 +42,7 @@
 -			     struct device_attribute *attr,
 -			     const char *buf, size_t count)
 +static ssize_t show_port_type1(struct device *dev, char *buf)
- {
--	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
--						   port_attr);
--	struct mlx4_dev *mdev = info->dev;
--	struct mlx4_priv *priv = mlx4_priv(mdev);
++{
 +	struct pci_dev *pdev = to_pci_dev(dev);
 +	struct mlx4_dev *mdev = pci_get_drvdata(pdev);
 +
@@ -67,63 +63,18 @@
 +		return -ENODEV;
 +}
 +
-+static ssize_t store_port(struct mlx4_dev *dev, int port,
++static ssize_t store_port(struct mlx4_dev *mdev, int port,
 +			  const char *buf, size_t count)
-+{
-+	struct mlx4_priv *priv = mlx4_priv(dev);
+ {
+-	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+-						   port_attr);
+-	struct mlx4_dev *mdev = info->dev;
+ 	struct mlx4_priv *priv = mlx4_priv(mdev);
 +	struct mlx4_port_info *info = &priv->port[port];
  	enum mlx4_port_type types[MLX4_MAX_PORTS];
- 	enum mlx4_port_type tmp_type;
+ 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
  	int i;
-@@ -438,50 +452,70 @@ static ssize_t set_port_type(struct device *dev,
- 	else if (!strcmp(buf, "auto\n"))
- 		info->tmp_type = MLX4_PORT_TYPE_AUTO;
- 	else {
--		mlx4_err(mdev, "%s is not supported port type\n", buf);
-+		mlx4_err(dev, "%s is not supported port type\n", buf);
- 		return -EINVAL;
- 	}
- 
- 	mutex_lock(&priv->port_mutex);
--	mdev->caps.possible_type[info->port] = info->tmp_type;
-+	dev->caps.possible_type[info->port] = info->tmp_type; 
- 	if (info->tmp_type == MLX4_PORT_TYPE_AUTO) {
--		err = mlx4_SENSE_PORT(mdev, info->port, &tmp_type);
-+		err = mlx4_SENSE_PORT(dev, info->port, &tmp_type);
- 		if (!err && (tmp_type == MLX4_PORT_TYPE_ETH ||
- 			     tmp_type == MLX4_PORT_TYPE_IB)) {
- 			info->tmp_type = tmp_type;
- 		}
- 	}
- 
--	for (i = 0; i < mdev->caps.num_ports; i++)
-+	for (i = 0; i < dev->caps.num_ports; i++)
- 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
--					mdev->caps.possible_type[i+1];
-+					dev->caps.possible_type[i+1];
- 
- 	if (priv->trig) {
--		if (++priv->changed_ports < mdev->caps.num_ports)
-+		if (++priv->changed_ports < dev->caps.num_ports)
- 			goto out;
- 		else
- 			priv->trig = priv->changed_ports = 0;
- 	}
--	err = mlx4_check_port_params(mdev, types);
-+	err = mlx4_check_port_params(dev, types);
- 	if (err)
- 		goto out;
- 
--	for (i = 0; i < mdev->caps.num_ports; i++) {
-+	for (i = 0; i < dev->caps.num_ports; i++) {
- 		priv->port[i + 1].tmp_type = 0;
- 	}
- 
--	err = mlx4_change_port_types(mdev, types);
-+	err = mlx4_change_port_types(dev, types);
- 
- out:
- 	mutex_unlock(&priv->port_mutex);
+@@ -482,6 +496,24 @@ out:
  	return err ? err : count;
  }
  
@@ -145,9 +96,11 @@
 +	return store_port(mdev, 2, buf, count);
 +}
 +
-+
- static ssize_t trigger_port(struct device *dev,
--			    struct device_attribute *attr,
+ /*
+  * This function is invoked if user wants to modify all port types
+  * at once. We will wait for all the ports to be assigned new values,
+@@ -491,7 +523,9 @@ static ssize_t trigger_port(struct device *dev,
+ 			    struct device_attribute *attr,
  			    const char *buf, size_t count)
  {
 -	struct mlx4_priv *priv = container_of(attr, struct mlx4_priv, trigger_attr);
@@ -157,7 +110,7 @@
  	if (!priv)
  		return -ENODEV;
  
-@@ -1121,8 +1155,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+@@ -1131,8 +1165,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
  
  	sprintf(info->dev_name, "mlx4_port%d", port);
  	memcpy(&info->port_attr.attr, &attr, sizeof(attr));

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,7 +6,7 @@
 ===================================================================
 --- ofed_kernel-2.6.9_U4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
 +++ ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -576,7 +576,6 @@ adjudge_to_death:
+@@ -580,7 +580,6 @@ adjudge_to_death:
  		/* TODO: tcp_fin_time to get timeout */
  		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
  			atomic_read(&sk->sk_refcnt));
@@ -14,15 +14,16 @@
  	}
  
  	/* TODO: limit number of orphaned sockets.
-@@ -850,7 +849,6 @@ void sdp_cancel_dreq_wait_timeout(struct
+@@ -860,8 +859,6 @@ void sdp_cancel_dreq_wait_timeout(struct
  		/* The timeout hasn't reached - need to clean ref count */
  		sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
  	}
+-
 -	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
  }
  
  void sdp_destroy_work(struct work_struct *work)
-@@ -890,9 +888,6 @@ void sdp_dreq_wait_timeout_work(struct w
+@@ -901,9 +898,6 @@ void sdp_dreq_wait_timeout_work(struct w
  
  	sdp_sk(sk)->dreq_wait_timeout = 0;
  
@@ -32,7 +33,7 @@
  	sdp_exch_state(sk, TCPF_LAST_ACK | TCPF_FIN_WAIT1, TCP_TIME_WAIT);
  
  	release_sock(sk);
-@@ -2131,7 +2126,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
+@@ -2163,7 +2157,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
  
  static atomic_t sockets_allocated;
  static atomic_t memory_allocated;
@@ -40,7 +41,7 @@
  static int memory_pressure;
  struct proto sdp_proto = {
          .close       = sdp_close,
-@@ -2152,13 +2146,11 @@ struct proto sdp_proto = {
+@@ -2184,13 +2177,11 @@ struct proto sdp_proto = {
  	.sockets_allocated = &sockets_allocated,
  	.memory_allocated = &memory_allocated,
  	.memory_pressure = &memory_pressure,
@@ -56,7 +57,7 @@
  	.name	     = "SDP",
  };
  
-@@ -2517,9 +2509,6 @@ static void __exit sdp_exit(void)
+@@ -2569,9 +2560,6 @@ static void __exit sdp_exit(void)
  	sock_unregister(PF_INET_SDP);
  	proto_unregister(&sdp_proto);
  

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/t3_hw_to_2_6_5-7_244.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/t3_hw_to_2_6_5-7_244.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U6/t3_hw_to_2_6_5-7_244.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,13 +1,8 @@
----
- drivers/net/cxgb3/adapter.h |    1 +
- drivers/net/cxgb3/t3_hw.c   |    6 ++----
- 2 files changed, 3 insertions(+), 4 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -194,6 +194,7 @@ struct adapter {
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 21dad82..1c4e828 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -207,6 +207,7 @@ struct adapter {
  	struct list_head adapter_list;
  	void __iomem *regs;
  	struct pci_dev *pdev;
@@ -15,11 +10,11 @@
  	unsigned long registered_device_map;
  	unsigned long open_device_map;
  	unsigned long flags;
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3494,7 +3494,7 @@ static int t3_reset_adapter(struct adapt
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index 533fc74..f6c000e 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3534,7 +3534,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  	uint16_t devid = 0;
  
  	if (save_and_restore_pcie)
@@ -28,7 +23,7 @@
  	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
  
  	/*
-@@ -3512,7 +3512,7 @@ static int t3_reset_adapter(struct adapt
+@@ -3552,7 +3552,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  		return -1;
  
  	if (save_and_restore_pcie)
@@ -37,12 +32,12 @@
  	return 0;
  }
  
-@@ -3640,8 +3640,6 @@ int t3_prep_adapter(struct adapter *adap
+@@ -3688,8 +3688,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  
  		memcpy(adapter->port[i]->dev_addr, hw_addr,
  		       ETH_ALEN);
 -		memcpy(adapter->port[i]->perm_addr, hw_addr,
 -		       ETH_ALEN);
- 		init_link_config(&p->link_config, p->port_type->caps);
+ 		init_link_config(&p->link_config, p->phy.caps);
  		p->phy.ops->power_down(&p->phy, 1);
- 		if (!(p->port_type->caps & SUPPORTED_IRQ))
+ 		if (!(p->phy.caps & SUPPORTED_IRQ))

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_2_z010_sysfs_to_2.6.18.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_2_z010_sysfs_to_2.6.18.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_2_z010_sysfs_to_2.6.18.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/core/device.c	2009-02-24 14:00:50.387541000 +0200
++++ b/drivers/infiniband/core/device.c	2009-02-24 14:00:54.891362000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	kobject_put(&device->dev.kobj);
++	ib_device_unregister_sysfs(device);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,8 +356,6 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
+-	ib_device_unregister_sysfs(device);
+-
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+--- a/drivers/infiniband/core/sysfs.c	2009-02-24 13:58:42.509754000 +0200
++++ b/drivers/infiniband/core/sysfs.c	2009-02-24 14:00:54.894368000 +0200
+@@ -863,9 +863,6 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
+-	/* Hold kobject until ib_dealloc_device() */
+-	kobject_get(&device->dev.kobj);
+-
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_z0010_sysfs_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_z0010_sysfs_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/core_z0010_sysfs_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,496 @@
+ib_core: avoid race condition between sysfs access and low-level module unload. (V2)
+
+In newer kernels, although a low-level module will not be unloaded (code)
+while its sysfs interface is being accessed, it is possible for the module to
+free all its resources (data) during such access.  This almost always causes
+a kernel Oops.
+
+To avoid this, we protect the device reg_state with a mutex, and perform
+all sysfs operations (show, store) atomically within this mutex.
+
+V2: fix thinko bug in sysfs_state_show changes(found by Ralph Campbell).
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-22 15:36:34.531430000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-22 15:38:28.650322000 +0200
+@@ -178,9 +178,14 @@ static int end_port(struct ib_device *de
+  */
+ struct ib_device *ib_alloc_device(size_t size)
+ {
++	struct ib_device *ibdev;
++
+ 	BUG_ON(size < sizeof (struct ib_device));
+ 
+-	return kzalloc(size, GFP_KERNEL);
++	ibdev = kzalloc(size, GFP_KERNEL);
++	if (ibdev)
++		mutex_init(&ibdev->sysfs_mutex);
++	return ibdev;
+ }
+ EXPORT_SYMBOL(ib_alloc_device);
+ 
+@@ -313,9 +318,10 @@ int ib_register_device(struct ib_device 
+ 		goto out;
+ 	}
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	list_add_tail(&device->core_list, &device_list);
+-
+ 	device->reg_state = IB_DEV_REGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ 
+ 	{
+ 		struct ib_client *client;
+@@ -361,7 +367,9 @@ void ib_unregister_device(struct ib_devi
+ 		kfree(context);
+ 	spin_unlock_irqrestore(&device->client_data_lock, flags);
+ 
++	mutex_lock(&device->sysfs_mutex);
+ 	device->reg_state = IB_DEV_UNREGISTERED;
++	mutex_unlock(&device->sysfs_mutex);
+ }
+ EXPORT_SYMBOL(ib_unregister_device);
+ 
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-22 15:36:35.914354000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-22 16:12:37.485629000 +0200
+@@ -94,7 +94,7 @@ static ssize_t state_show(struct ib_port
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+ 	static const char *state_name[] = {
+ 		[IB_PORT_NOP]		= "NOP",
+@@ -105,26 +105,33 @@ static ssize_t state_show(struct ib_port
+ 		[IB_PORT_ACTIVE_DEFER]	= "ACTIVE_DEFER"
+ 	};
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%d: %s\n", attr.state,
+-		       attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
+-		       state_name[attr.state] : "UNKNOWN");
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d: %s\n", attr.state,
++				      attr.state >= 0 &&
++				      attr.state < ARRAY_SIZE(state_name) ?
++				      state_name[attr.state] : "UNKNOWN");
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "0x%x\n", attr.lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t lid_mask_count_show(struct ib_port *p,
+@@ -132,52 +139,64 @@ static ssize_t lid_mask_count_show(struc
+ 				   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.lmc);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.lmc);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
+ 			   char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%x\n", attr.sm_lid);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%x\n", attr.sm_lid);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
+ 			  char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "%d\n", attr.sm_sl);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%d\n", attr.sm_sl);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
+ 			     char *buf)
+ {
+ 	struct ib_port_attr attr;
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret)
++			ret = sprintf(buf, "0x%08x\n", attr.port_cap_flags);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
+@@ -186,24 +205,33 @@ static ssize_t rate_show(struct ib_port 
+ 	struct ib_port_attr attr;
+ 	char *speed = "";
+ 	int rate;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
+-
+-	switch (attr.active_speed) {
+-	case 2: speed = " DDR"; break;
+-	case 4: speed = " QDR"; break;
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.active_speed) {
++			case 2: speed = " DDR"; break;
++			case 4: speed = " QDR"; break;
++			}
++
++			rate = 25 * ib_width_enum_to_int(attr.active_width) *
++				attr.active_speed;
++			if (rate < 0) {
++				ret = -EINVAL;
++				goto out;
++			}
++
++			ret = sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
++				      rate / 10, rate % 10 ? ".5" : "",
++				      ib_width_enum_to_int(attr.active_width),
++				      speed);
++		}
+ 	}
+-
+-	rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
+-	if (rate < 0)
+-		return -EINVAL;
+-
+-	return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
+-		       rate / 10, rate % 10 ? ".5" : "",
+-		       ib_width_enum_to_int(attr.active_width), speed);
++out:
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
+@@ -211,22 +239,42 @@ static ssize_t phys_state_show(struct ib
+ {
+ 	struct ib_port_attr attr;
+ 
+-	ssize_t ret;
+-
+-	ret = ib_query_port(p->ibdev, p->port_num, &attr);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	switch (attr.phys_state) {
+-	case 1:  return sprintf(buf, "1: Sleep\n");
+-	case 2:  return sprintf(buf, "2: Polling\n");
+-	case 3:  return sprintf(buf, "3: Disabled\n");
+-	case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
+-	case 5:  return sprintf(buf, "5: LinkUp\n");
+-	case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
+-	case 7:  return sprintf(buf, "7: Phy Test\n");
+-	default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_port(p->ibdev, p->port_num, &attr);
++		if (!ret) {
++			switch (attr.phys_state) {
++			case 1:
++				ret = sprintf(buf, "1: Sleep\n");
++				break;
++			case 2:
++				ret = sprintf(buf, "2: Polling\n");
++				break;
++			case 3:
++				ret = sprintf(buf, "3: Disabled\n");
++				break;
++			case 4:
++				ret = sprintf(buf, "4: PortConfigurationTraining\n");
++				break;
++			case 5:
++				ret = sprintf(buf, "5: LinkUp\n");
++				break;
++			case 6:
++				ret = sprintf(buf, "6: LinkErrorRecovery\n");
++				break;
++			case 7:
++				ret = sprintf(buf, "7: Phy Test\n");
++				break;
++			default:
++				ret = sprintf(buf, "%d: <unknown>\n", attr.phys_state);
++				break;
++			}
++		}
+ 	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static PORT_ATTR_RO(state);
+@@ -256,21 +304,24 @@ static ssize_t show_port_gid(struct ib_p
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	union ib_gid gid;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) gid.raw)[0]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[1]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[2]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[3]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[4]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[5]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[6]),
+-		       be16_to_cpu(((__be16 *) gid.raw)[7]));
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) gid.raw)[0]),
++				      be16_to_cpu(((__be16 *) gid.raw)[1]),
++				      be16_to_cpu(((__be16 *) gid.raw)[2]),
++				      be16_to_cpu(((__be16 *) gid.raw)[3]),
++				      be16_to_cpu(((__be16 *) gid.raw)[4]),
++				      be16_to_cpu(((__be16 *) gid.raw)[5]),
++				      be16_to_cpu(((__be16 *) gid.raw)[6]),
++				      be16_to_cpu(((__be16 *) gid.raw)[7]));
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
+@@ -279,13 +330,16 @@ static ssize_t show_port_pkey(struct ib_
+ 	struct port_table_attribute *tab_attr =
+ 		container_of(attr, struct port_table_attribute, attr);
+ 	u16 pkey;
+-	ssize_t ret;
+-
+-	ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
+-	if (ret)
+-		return ret;
++	ssize_t ret = -ENODEV;
+ 
+-	return sprintf(buf, "0x%04x\n", pkey);
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (ibdev_is_alive(p->ibdev)) {
++		ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
++		if (!ret)
++			ret = sprintf(buf, "0x%04x\n", pkey);
++	}
++	mutex_unlock(&p->ibdev->sysfs_mutex);
++	return ret;
+ }
+ 
+ #define PORT_PMA_ATTR(_name, _counter, _width, _offset)			\
+@@ -308,6 +346,12 @@ static ssize_t show_pma_counter(struct i
+ 	if (!p->ibdev->process_mad)
+ 		return sprintf(buf, "N/A (no PMA)\n");
+ 
++	mutex_lock(&p->ibdev->sysfs_mutex);
++	if (!ibdev_is_alive(p->ibdev)) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ 	if (!in_mad || !out_mad) {
+@@ -354,7 +414,7 @@ static ssize_t show_pma_counter(struct i
+ out:
+ 	kfree(in_mad);
+ 	kfree(out_mad);
+-
++	mutex_unlock(&p->ibdev->sysfs_mutex);
+ 	return ret;
+ }
+ 
+@@ -594,20 +654,20 @@ static ssize_t show_sys_image_guid(struc
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_attr attr;
+-	ssize_t ret;
+-
+-	if (!ibdev_is_alive(dev))
+-		return -ENODEV;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = ib_query_device(dev, &attr);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%04x:%04x:%04x:%04x\n",
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+-		       be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_query_device(dev, &attr);
++		if (!ret)
++			ret = sprintf(buf, "%04x:%04x:%04x:%04x\n",
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
++				      be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static ssize_t show_node_guid(struct device *device,
+@@ -639,17 +699,20 @@ static ssize_t set_node_desc(struct devi
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	struct ib_device_modify desc = {};
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	if (!dev->modify_device)
+ 		return -EIO;
+ 
+ 	memcpy(desc.node_desc, buf, min_t(int, count, 64));
+-	ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
++		if (!ret)
++			ret = count;
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+@@ -677,14 +740,18 @@ static ssize_t show_protocol_stat(const 
+ {
+ 	struct ib_device *dev = container_of(device, struct ib_device, dev);
+ 	union rdma_protocol_stats stats;
+-	ssize_t ret;
++	ssize_t ret = -ENODEV;
+ 
+-	ret = dev->get_protocol_stats(dev, &stats);
+-	if (ret)
+-		return ret;
+-
+-	return sprintf(buf, "%llu\n",
+-		       (unsigned long long) ((u64 *) &stats)[offset]);
++	mutex_lock(&dev->sysfs_mutex);
++	if (ibdev_is_alive(dev)) {
++		ret = dev->get_protocol_stats(dev, &stats);
++		if (!ret)
++			ret = sprintf(buf, "%llu\n",
++				      (unsigned long long)
++				      ((u64 *) &stats)[offset]);
++	}
++	mutex_unlock(&dev->sysfs_mutex);
++	return ret;
+ }
+ 
+ /* generate a read-only iwarp statistics attribute */
+Index: ofed_kernel/include/rdma/ib_verbs.h
+===================================================================
+--- ofed_kernel.orig/include/rdma/ib_verbs.h	2009-02-22 15:36:40.252210000 +0200
++++ ofed_kernel/include/rdma/ib_verbs.h	2009-02-22 15:38:28.678322000 +0200
+@@ -1205,6 +1205,7 @@ struct ib_device {
+ 		IB_DEV_REGISTERED,
+ 		IB_DEV_UNREGISTERED
+ 	}                            reg_state;
++	struct mutex		     sysfs_mutex;
+ 
+ 	u64			     uverbs_cmd_mask;
+ 	int			     uverbs_abi_ver;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0002_undo_250.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0002_undo_250.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0002_undo_250.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,8 @@
-commit 204e2f98c2d13f869b8541f3c57c7314f75cab11
-Author: Divy Le Ray <divy at chelsio.com>
-Date:   Tue May 6 19:26:01 2008 -0700
-
-    cxgb3 - fix EEH
-    
-    Reset the chip when the PCI link goes down.
-    Preserve the napi structure when a sge qset's resources are freed.
-    Replay only HW initialization when the chip comes out of reset.
-    
-    Signed-off-by: Divy Le ray <divy at chelsio.com>
-    Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
-
----
- drivers/net/cxgb3/common.h     |    1 -
- drivers/net/cxgb3/cxgb3_main.c |   10 ++++------
- drivers/net/cxgb3/regs.h       |    8 --------
- drivers/net/cxgb3/sge.c        |   31 ++-----------------------------
- drivers/net/cxgb3/t3_hw.c      |   28 ----------------------------
- 5 files changed, 6 insertions(+), 72 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/common.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/common.h
-+++ ofed_kernel/drivers/net/cxgb3/common.h
-@@ -700,7 +700,6 @@ void mac_prep(struct cmac *mac, struct a
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index d5e9bf7..dc2c79d 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -726,7 +726,6 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
  void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
  int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  		    int reset);
@@ -31,11 +10,11 @@
  void t3_led_ready(struct adapter *adapter);
  void t3_fatal_err(struct adapter *adapter);
  void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -2449,6 +2449,9 @@ static pci_ers_result_t t3_io_error_dete
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 8de820e..48fbda6 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -2485,6 +2485,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  		offload_close(&adapter->tdev);
  
@@ -45,7 +24,7 @@
  	adapter->flags &= ~FULL_INIT_DONE;
  
  	pci_disable_device(pdev);
-@@ -2473,12 +2476,8 @@ static pci_ers_result_t t3_io_slot_reset
+@@ -2509,12 +2512,8 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  		goto err;
  	}
  	pci_set_master(pdev);
@@ -59,7 +38,7 @@
  		goto err;
  
  	return PCI_ERS_RESULT_RECOVERED;
-@@ -2630,7 +2629,6 @@ static int __devinit init_one(struct pci
+@@ -2666,7 +2665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
  	}
  
  	pci_set_master(pdev);
@@ -67,10 +46,10 @@
  
  	mmio_start = pci_resource_start(pdev, 0);
  	mmio_len = pci_resource_len(pdev, 0);
-Index: ofed_kernel/drivers/net/cxgb3/regs.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/regs.h
-+++ ofed_kernel/drivers/net/cxgb3/regs.h
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index a035d5c..086cd02 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
 @@ -444,14 +444,6 @@
  
  #define A_PCIE_CFG 0x88
@@ -86,11 +65,11 @@
  #define S_PCIE_CLIDECEN    16
  #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
  #define F_PCIE_CLIDECEN    V_PCIE_CLIDECEN(1U)
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index 1b0861d..912f816 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -563,33 +563,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
  }
  
  /**
@@ -124,7 +103,7 @@
   *	free_qset - free the resources of an SGE queue set
   *	@adapter: the adapter owning the queue set
   *	@q: the queue set
-@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter 
+@@ -645,7 +618,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -133,20 +112,11 @@
  }
  
  /**
-@@ -1429,7 +1402,7 @@ static void restart_ctrlq(unsigned long 
-  */
- int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
- {
--	int ret;
-+	int ret;
- 	local_bh_disable();
- 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
- 	local_bh_enable();
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3265,7 +3265,6 @@ static void config_pcie(struct adapter *
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index bf5c076..533fc74 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3303,7 +3303,6 @@ static void config_pcie(struct adapter *adap)
  
  	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
  	t3_set_reg_field(adap, A_PCIE_CFG, 0,
@@ -154,15 +124,14 @@
  			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
  }
  
-@@ -3657,30 +3656,3 @@ void t3_led_ready(struct adapter *adapte
- 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+@@ -3706,31 +3705,3 @@ void t3_led_ready(struct adapter *adapter)
  			 F_GPIO0_OUT_VAL);
  }
--
+ 
 -int t3_replay_prep_adapter(struct adapter *adapter)
 -{
 -	const struct adapter_info *ai = adapter->params.info;
--	unsigned int i, j = 0;
+-	unsigned int i, j = -1;
 -	int ret;
 -
 -	early_hw_init(adapter, ai);
@@ -171,15 +140,17 @@
 -		return ret;
 -
 -	for_each_port(adapter, i) {
+-		const struct port_type_info *pti;
 -		struct port_info *p = adap2pinfo(adapter, i);
--		while (!adapter->params.vpd.port_type[j])
--			++j;
 -
--		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
--					ai->mdio_ops);
+-		while (!adapter->params.vpd.port_type[++j])
+-			;
 -
+-		pti = &port_types[adapter->params.vpd.port_type[j]];
+-		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
+-		if (ret)
+-			return ret;
 -		p->phy.ops->power_down(&p->phy, 1);
--		++j;
 -	}
 -
 -return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0010_napi.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0010_napi.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/cxgb3_0010_napi.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,29 +1,21 @@
----
- drivers/net/cxgb3/adapter.h    |   22 +++--
- drivers/net/cxgb3/cxgb3_main.c |   94 ++++++++++++++++------
- drivers/net/cxgb3/sge.c        |  170 ++++++++++++++++++++++++-----------------
- 3 files changed, 185 insertions(+), 101 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -47,13 +47,11 @@
- #include <asm/io.h>
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 3b33ecb..21dad82 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -48,12 +48,10 @@
  
  struct vlan_group;
--struct adapter;
+ struct adapter;
 -struct sge_qset;
  
-+struct adapter;
  struct port_info {
  	struct adapter *adapter;
  	struct vlan_group *vlan_grp;
 -	struct sge_qset *qs;
- 	const struct port_type_info *port_type;
  	u8 port_id;
  	u8 rx_csum_offload;
-@@ -184,8 +182,6 @@ enum {				/* per port SGE statistics */
+ 	u8 nqsets;
+@@ -183,8 +181,6 @@ enum {				/* per port SGE statistics */
  #define T3_MAX_LRO_MAX_PKTS 64
  
  struct sge_qset {		/* an SGE queue set */
@@ -32,7 +24,7 @@
  	struct sge_rspq rspq;
  	struct sge_fl fl[SGE_RXQ_PER_SET];
  	struct sge_txq txq[SGE_TXQ_PER_SET];
-@@ -196,7 +192,7 @@ struct sge_qset {		/* an SGE queue set *
+@@ -195,7 +191,7 @@ struct sge_qset {		/* an SGE queue set */
  	int lro_enabled;
  	int lro_frag_len;
  	void *lro_va;
@@ -41,7 +33,7 @@
  	unsigned long txq_stopped;	/* which Tx queues are stopped */
  	struct timer_list tx_reclaim_timer;	/* reclaims TX buffers */
  	unsigned long port_stats[SGE_PSTAT_MAX];
-@@ -241,6 +237,12 @@ struct adapter {
+@@ -240,6 +236,12 @@ struct adapter {
  	struct delayed_work adap_check_task;
  	struct work_struct ext_intr_handler_task;
  
@@ -54,7 +46,7 @@
  	struct dentry *debugfs_root;
  
  	struct mutex mdio_lock;
-@@ -267,6 +269,12 @@ static inline struct port_info *adap2pin
+@@ -266,6 +268,12 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
  	return netdev_priv(adap->port[idx]);
  }
  
@@ -67,7 +59,7 @@
  #define OFFLOAD_DEVMAP_BIT 15
  
  #define tdev2adap(d) container_of(d, struct adapter, tdev)
-@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, str
+@@ -292,7 +300,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -76,11 +68,11 @@
  int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
  		unsigned char *data);
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
-Index: ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/cxgb3_main.c
-+++ ofed_kernel/drivers/net/cxgb3/cxgb3_main.c
-@@ -410,17 +410,49 @@ static void setup_rss(struct adapter *ad
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 0f4c694..342d441 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -435,17 +435,49 @@ static void setup_rss(struct adapter *adap)
  		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  }
  
@@ -98,9 +90,7 @@
 -	int i;
 +	int i, j, dummy_idx = 0;
 +	struct net_device *nd;
- 
--	for (i = 0; i < SGE_QSETS; i++) {
--		struct sge_qset *qs = &adap->sge.qs[i];
++
 +	for_each_port(adap, i) {
 +		struct net_device *dev = adap->port[i];
 +		const struct port_info *pi = netdev_priv(dev);
@@ -112,7 +102,9 @@
 +				nd = alloc_netdev(sizeof(*p), "", ether_setup);
 +				if (!nd)
 +					goto free_all;
-+
+ 
+-	for (i = 0; i < SGE_QSETS; i++) {
+-		struct sge_qset *qs = &adap->sge.qs[i];
 +				p = netdev_priv(nd);
 +				p->adapter = adap;
 +				nd->weight = 64;
@@ -137,7 +129,7 @@
  }
  
  /*
-@@ -431,18 +463,20 @@ static void init_napi(struct adapter *ad
+@@ -456,18 +488,20 @@ static void init_napi(struct adapter *adap)
  static void quiesce_rx(struct adapter *adap)
  {
  	int i;
@@ -168,7 +160,7 @@
  }
  
  /**
-@@ -455,7 +489,7 @@ static void enable_all_napi(struct adapt
+@@ -480,7 +514,7 @@ static void enable_all_napi(struct adapter *adap)
   */
  static int setup_sge_qsets(struct adapter *adap)
  {
@@ -177,7 +169,7 @@
  	unsigned int ntxq = SGE_TXQ_PER_SET;
  
  	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
-@@ -463,14 +497,15 @@ static int setup_sge_qsets(struct adapte
+@@ -488,14 +522,15 @@ static int setup_sge_qsets(struct adapter *adap)
  
  	for_each_port(adap, i) {
  		struct net_device *dev = adap->port[i];
@@ -196,7 +188,7 @@
  			if (err) {
  				t3_free_sge_resources(adap);
  				return err;
-@@ -884,6 +919,10 @@ static int cxgb_up(struct adapter *adap)
+@@ -909,6 +944,10 @@ static int cxgb_up(struct adapter *adap)
  				goto out;
  		}
  
@@ -207,7 +199,7 @@
  		err = t3_init_hw(adap, 0);
  		if (err)
  			goto out;
-@@ -896,7 +935,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -921,7 +960,6 @@ static int cxgb_up(struct adapter *adap)
  			goto out;
  
  		setup_rss(adap);
@@ -215,7 +207,7 @@
  		adap->flags |= FULL_INIT_DONE;
  	}
  
-@@ -924,7 +962,6 @@ static int cxgb_up(struct adapter *adap)
+@@ -949,7 +987,6 @@ static int cxgb_up(struct adapter *adap)
  				      adap->name, adap)))
  		goto irq_err;
  
@@ -223,7 +215,7 @@
  	t3_sge_start(adap);
  	t3_intr_enable(adap);
  
-@@ -1061,10 +1098,8 @@ static int cxgb_open(struct net_device *
+@@ -1086,10 +1123,8 @@ static int cxgb_open(struct net_device *dev)
  	int other_ports = adapter->open_device_map & PORT_MASK;
  	int err;
  
@@ -235,7 +227,7 @@
  
  	set_bit(pi->port_id, &adapter->open_device_map);
  	if (is_offload(adapter) && !ofld_disable) {
-@@ -2700,6 +2735,7 @@ static int __devinit init_one(struct pci
+@@ -2736,6 +2771,7 @@ static int __devinit init_one(struct pci_dev *pdev,
  #ifdef CONFIG_NET_POLL_CONTROLLER
  		netdev->poll_controller = cxgb_netpoll;
  #endif
@@ -243,7 +235,7 @@
  
  		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
  	}
-@@ -2800,6 +2836,12 @@ static void __devexit remove_one(struct 
+@@ -2836,6 +2872,12 @@ static void __devexit remove_one(struct pci_dev *pdev)
  		t3_free_sge_resources(adapter);
  		cxgb_disable_msi(adapter);
  
@@ -256,11 +248,11 @@
  		for_each_port(adapter, i)
  			if (adapter->port[i])
  				free_netdev(adapter->port[i]);
-Index: ofed_kernel/drivers/net/cxgb3/sge.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/sge.c
-+++ ofed_kernel/drivers/net/cxgb3/sge.c
-@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter 
+diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
+index f6bc6fe..3bbf626 100644
+--- a/drivers/net/cxgb3/sge.c
++++ b/drivers/net/cxgb3/sge.c
+@@ -618,6 +618,9 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
  				  q->rspq.desc, q->rspq.phys_addr);
  	}
  
@@ -270,7 +262,7 @@
  	memset(q, 0, sizeof(*q));
  }
  
-@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, str
+@@ -1116,7 +1119,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  	unsigned int ndesc, pidx, credits, gen, compl;
  	const struct port_info *pi = netdev_priv(dev);
  	struct adapter *adap = pi->adapter;
@@ -279,7 +271,7 @@
  	struct sge_txq *q = &qs->txq[TXQ_ETH];
  
  	/*
-@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long 
+@@ -1365,12 +1368,13 @@ static void restart_ctrlq(unsigned long data)
  	struct sk_buff *skb;
  	struct sge_qset *qs = (struct sge_qset *)data;
  	struct sge_txq *q = &qs->txq[TXQ_CTRL];
@@ -295,7 +287,7 @@
  
  		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
  
-@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long 
+@@ -1393,7 +1397,7 @@ static void restart_ctrlq(unsigned long data)
  
  	spin_unlock(&q->lock);
  	wmb();
@@ -304,7 +296,7 @@
  		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
  }
  
-@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struc
+@@ -1683,7 +1687,8 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
  	else {
  		struct sge_qset *qs = rspq_to_qset(q);
  
@@ -314,7 +306,7 @@
  		q->rx_head = skb;
  	}
  	q->rx_tail = skb;
-@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundl
+@@ -1719,30 +1724,34 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
   *	receive handler.  Batches need to be of modest size as we do prefetches
   *	on the packets in each.
   */
@@ -357,7 +349,7 @@
  			prefetch(head->data);
  			skbs[ngathered] = head;
  			head = head->next;
-@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct 
+@@ -1764,8 +1773,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
  		}
  		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
  	}
@@ -370,7 +362,7 @@
  }
  
  /**
-@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const
+@@ -2325,47 +2336,50 @@ static inline int is_pure_response(const struct rsp_desc *r)
  
  /**
   *	napi_rx_handler - the NAPI handler for Rx processing
@@ -449,7 +441,7 @@
  }
  
  /**
-@@ -2448,7 +2462,8 @@ static inline int handle_responses(struc
+@@ -2448,7 +2462,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
  		return 0;
  	}
@@ -459,7 +451,7 @@
  	return 1;
  }
  
-@@ -2459,7 +2474,8 @@ static inline int handle_responses(struc
+@@ -2459,7 +2474,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
  irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -469,7 +461,7 @@
  	struct sge_rspq *q = &qs->rspq;
  
  	spin_lock(&q->lock);
-@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, vo
+@@ -2478,11 +2494,13 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
  static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
  {
  	struct sge_qset *qs = cookie;
@@ -484,7 +476,7 @@
  		q->unhandled_irqs++;
  	spin_unlock(&q->lock);
  	return IRQ_HANDLED;
-@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, 
+@@ -2525,13 +2543,11 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
  	return IRQ_HANDLED;
  }
  
@@ -502,7 +494,7 @@
  		return 1;
  	}
  	return 0;
-@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int 
+@@ -2552,9 +2568,10 @@ static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
  
  	spin_lock(&q->lock);
  
@@ -515,7 +507,7 @@
  	if (!new_packets && t3_slow_intr_handler(adap) == 0)
  		q->unhandled_irqs++;
  
-@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, voi
+@@ -2657,9 +2674,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
  static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  {
  	u32 map;
@@ -527,7 +519,7 @@
  
  	t3_write_reg(adap, A_PL_CLI, 0);
  	map = t3_read_reg(adap, A_SG_DATA_INTR);
-@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq
+@@ -2672,11 +2689,18 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
  	if (unlikely(map & F_ERRINTR))
  		t3_slow_intr_handler(adap);
  
@@ -550,7 +542,7 @@
  
  	spin_unlock(&q0->lock);
  	return IRQ_HANDLED;
-@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long d
+@@ -2775,7 +2799,8 @@ static void sge_timer_cb(unsigned long data)
  {
  	spinlock_t *lock;
  	struct sge_qset *qs = (struct sge_qset *)data;
@@ -560,7 +552,7 @@
  
  	if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
  		reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
-@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long d
+@@ -2786,9 +2811,9 @@ static void sge_timer_cb(unsigned long data)
  		spin_unlock(&qs->txq[TXQ_OFLD].lock);
  	}
  	lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
@@ -572,7 +564,7 @@
  			u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
  
  			if (qs->fl[0].credits < qs->fl[0].size)
-@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long d
+@@ -2822,9 +2847,12 @@ static void sge_timer_cb(unsigned long data)
   */
  void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
  {
@@ -586,7 +578,7 @@
  }
  
  /**
-@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_
+@@ -2844,7 +2872,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
   */
  int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  		      int irq_vec_idx, const struct qset_params *p,
@@ -595,7 +587,7 @@
  {
  	int i, avail, ret = -ENOMEM;
  	struct sge_qset *q = &adapter->sge.qs[id];
-@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *ad
+@@ -2978,11 +3006,17 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
  	}
  
  	spin_unlock_irq(&adapter->sge.reg_lock);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0005_pci_dma_mapping_error_to_2_6_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -3,23 +3,32 @@
  drivers/infiniband/hw/ipath/ipath_user_sdma.c |    6 +++---
  2 files changed, 4 insertions(+), 4 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:37:35.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 21:41:22.000000000 -0800
 @@ -698,7 +698,7 @@ retry:
  
  	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
  			      tx->map_len, DMA_TO_DEVICE);
--	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
-+	if (dma_mapping_error(addr)) {
- 		ret = -EIO;
- 		goto unlock;
- 	}
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+-	if (dma_mapping_error(&dd->pcidev->dev, addr))
++	if (dma_mapping_error(addr))
+ 		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(&dd->pcidev->dev, addr))
++		if (dma_mapping_error(addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:18.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-01-25 21:37:54.000000000 -0800
 @@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(cons
  
  	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0100_iowrite32_copy.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0100_iowrite32_copy.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipath_0100_iowrite32_copy.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,88 +1,16 @@
-BACKPORT - backport iowrite32_copy to 2.6.15 and earlier
+BACKPORT - backport iowrite32_copy awareness to 2.6.15 and earlier
 
+Signed-off-by: John Gregor <john.gregor at qlogic.com>
 ---
- drivers/infiniband/hw/ipath/Makefile                |    1 
- drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S |   57 ++++++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_backport.h        |   48 ++++++++++++++++
- drivers/infiniband/hw/ipath/ipath_kernel.h          |    1 
- drivers/infiniband/hw/ipath/ipath_verbs.h           |    2 
- 5 files changed, 109 insertions(+)
+ drivers/infiniband/hw/ipath/ipath_backport.h |   48 +++++++++++++++++++++++++++
+ drivers/infiniband/hw/ipath/ipath_kernel.h   |    1 
+ drivers/infiniband/hw/ipath/ipath_verbs.h    |    2 +
+ 3 files changed, 51 insertions(+)
 
-Index: ofed_kernel/drivers/infiniband/hw/ipath/Makefile
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/Makefile
-+++ ofed_kernel/drivers/infiniband/hw/ipath/Makefile
-@@ -39,3 +39,4 @@ ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6
- 
- ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
- ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
-+ib_ipath-$(CONFIG_X86_64) += iowrite32_copy_x86_64.o
-Index: ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-===================================================================
 --- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
-@@ -0,0 +1,57 @@
-+/*
-+ * Copyright (c) 2003, 2004, 2005. PathScale, Inc. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ */
-+
-+/**
-+ * __iowrite32_copy - copy a memory block using dword multiple writes
-+ *
-+ * This is primarily for writing to the InfiniPath PIO buffers, which
-+ * only support dword multiple writes, and thus can not use memcpy().
-+ * For this reason, we use nothing smaller than dword writes.
-+ * It is also used as a fast copy routine in some places that have been
-+ * measured to win over memcpy, and the performance delta matters.
-+ *
-+ * Count is number of dwords; might not be a qword multiple.
-+ */
-+
-+ 	.globl __iowrite32_copy
-+	.p2align 4
-+/* rdi	destination, rsi source, rdx count */
-+__iowrite32_copy:
-+	movl %edx,%ecx
-+	shrl $1,%ecx
-+	andl $1,%edx
-+	rep
-+	movsq
-+	movl %edx,%ecx
-+	rep
-+	movsd
-+	ret
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
-===================================================================
---- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_backport.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_backport.h
 @@ -0,0 +1,48 @@
 +#ifndef _IPATH_BACKPORT_H
 +#define _IPATH_BACKPORT_H
@@ -132,10 +60,10 @@
 +void __iowrite32_copy(void __iomem * dst, const void *src, size_t count);
 +
 +#endif				/* _IPATH_BACKPORT_H */
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_kernel.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_kernel.h
 @@ -47,6 +47,7 @@
  #include <asm/io.h>
  #include <rdma/ib_verbs.h>
@@ -144,10 +72,10 @@
  #include "ipath_common.h"
  #include "ipath_debug.h"
  #include "ipath_registers.h"
-Index: ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
-+++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_verbs.h
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_verbs.h
 @@ -42,6 +42,8 @@
  #include <rdma/ib_pack.h>
  #include <rdma/ib_user_verbs.h>

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0100_to_2.6.21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0100_to_2.6.21.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0100_to_2.6.21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,11 +12,11 @@
  drivers/infiniband/ulp/ipoib/ipoib_multicast.c |   10 ++--
  5 files changed, 61 insertions(+), 64 deletions(-)
 
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib.h	2008-11-03 18:56:08.000000000 +0200
-@@ -279,8 +279,6 @@ struct ipoib_dev_priv {
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib.h	2009-04-16 23:46:20.000000000 +0300
+@@ -277,8 +277,6 @@ struct ipoib_dev_priv {
  
  	struct net_device *dev;
  
@@ -25,7 +25,7 @@
  	unsigned long flags;
  
  	struct mutex vlan_mutex;
-@@ -338,6 +336,8 @@ struct ipoib_dev_priv {
+@@ -336,6 +334,8 @@ struct ipoib_dev_priv {
  
  	struct ib_event_handler event_handler;
  
@@ -34,7 +34,7 @@
  	struct net_device *parent;
  	struct list_head child_intfs;
  	struct list_head list;
-@@ -425,7 +425,7 @@ extern struct workqueue_struct *ipoib_wo
+@@ -423,7 +423,7 @@ extern struct workqueue_struct *ipoib_wo
  
  /* functions */
  
@@ -43,10 +43,10 @@
  void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
  void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2009-04-16 23:46:20.000000000 +0300
 @@ -593,7 +593,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		ipoib_dbg(priv, "cm recv error "
  			   "(status=%d, wrid=%d vend_err %x)\n",
@@ -56,7 +56,7 @@
  		if (has_srq)
  			goto repost;
  		else {
-@@ -658,7 +658,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
+@@ -646,7 +646,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
  		 * this packet and reuse the old buffer.
  		 */
  		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
@@ -65,7 +65,7 @@
  		goto repost;
  	}
  
-@@ -676,8 +676,8 @@ copied:
+@@ -664,8 +664,8 @@ copied:
  	skb_pull(skb, IPOIB_ENCAP_LEN);
  
  	dev->last_rx = jiffies;
@@ -76,7 +76,7 @@
  
  	skb->dev = dev;
  	/* XXX get correct PACKET_ type here */
-@@ -726,8 +726,8 @@ void ipoib_cm_send(struct net_device *de
+@@ -714,8 +714,8 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(skb->len > tx->mtu)) {
  		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
  			   skb->len, tx->mtu);
@@ -87,7 +87,7 @@
  		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
  		return;
  	}
-@@ -746,7 +746,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -734,7 +734,7 @@ void ipoib_cm_send(struct net_device *de
  	tx_req->skb = skb;
  	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
  	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
@@ -96,7 +96,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -756,7 +756,7 @@ void ipoib_cm_send(struct net_device *de
+@@ -744,7 +744,7 @@ void ipoib_cm_send(struct net_device *de
  	if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
  			       addr, skb->len))) {
  		ipoib_warn(priv, "post_send failed\n");
@@ -105,7 +105,7 @@
  		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
  		dev_kfree_skb_any(skb);
  	} else {
-@@ -793,8 +793,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
+@@ -781,8 +781,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
  	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
  
  	/* FIXME: is this right? Shouldn't we only increment on success? */
@@ -116,10 +116,10 @@
  
  	dev_kfree_skb_any(tx_req->skb);
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:49:04.000000000 +0300
 @@ -261,7 +261,7 @@ static void ipoib_ib_handle_rx_wc(struct
  	 * this packet and reuse the old buffer.
  	 */
@@ -263,60 +263,41 @@
  		--priv->tx_outstanding;
  		ipoib_dma_unmap_tx(priv->ca, tx_req);
  		dev_kfree_skb_any(skb);
-@@ -809,6 +812,7 @@ int ipoib_ib_dev_stop(struct net_device 
+@@ -708,8 +711,7 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+ 
+ 	return 0;
+ }
+@@ -828,8 +830,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
  	int i;
  
- 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
+-	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+-		napi_disable(&priv->napi);
++	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
 +	netif_poll_disable(dev);
  
  	ipoib_cm_dev_stop(dev);
  
-@@ -893,6 +897,7 @@ timeout:
- 		msleep(1);
- 	}
+@@ -903,6 +905,7 @@ timeout:
  
+ 	ipoib_ah_dev_cleanup(dev);
+ 
 +	netif_poll_enable(dev);
  	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
  
  	return 0;
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:55:35.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-11-03 18:58:18.000000000 +0200
-@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:46:20.000000000 +0300
+@@ -585,7 +585,7 @@ static void neigh_add_path(struct sk_buf
  
- 	ipoib_dbg(priv, "bringing up interface\n");
- 
--	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
--		napi_enable(&priv->napi);
-+	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- 
- 	if (ipoib_pkey_dev_delay_open(dev))
- 		return 0;
- 
--	if (ipoib_ib_dev_open(dev)) {
--		napi_disable(&priv->napi);
--		return -EINVAL;
--	}
-+	if (ipoib_ib_dev_open(dev))
-+  		return -EINVAL;
- 
- 	if (ipoib_ib_dev_up(dev)) {
- 		ipoib_ib_dev_stop(dev, 1);
--		napi_disable(&priv->napi);
- 		return -EINVAL;
- 	}
- 
-@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
- 	ipoib_dbg(priv, "stopping interface\n");
- 
- 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
--	napi_disable(&priv->napi);
- 
- 	netif_stop_queue(dev);
- 
-@@ -604,7 +599,7 @@ static void neigh_add_path(struct sk_buf
- 
  	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
  	if (!neigh) {
 -		++dev->stats.tx_dropped;
@@ -324,7 +305,7 @@
  		dev_kfree_skb_any(skb);
  		return;
  	}
-@@ -667,7 +662,7 @@ err_list:
+@@ -646,7 +646,7 @@ err_list:
  err_path:
  	ipoib_neigh_free(dev, neigh);
  err_drop:
@@ -333,7 +314,7 @@
  	dev_kfree_skb_any(skb);
  
  	spin_unlock_irqrestore(&priv->lock, flags);
-@@ -714,7 +709,7 @@ static void unicast_arp_send(struct sk_b
+@@ -697,7 +697,7 @@ static void unicast_arp_send(struct sk_b
  			} else
  				__path_add(dev, path);
  		} else {
@@ -342,7 +323,7 @@
  			dev_kfree_skb_any(skb);
  		}
  
-@@ -733,7 +728,7 @@ static void unicast_arp_send(struct sk_b
+@@ -716,7 +716,7 @@ static void unicast_arp_send(struct sk_b
  		skb_push(skb, sizeof *phdr);
  		__skb_queue_tail(&path->queue, skb);
  	} else {
@@ -351,7 +332,7 @@
  		dev_kfree_skb_any(skb);
  	}
  
-@@ -790,7 +785,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -773,7 +773,7 @@ static int ipoib_start_xmit(struct sk_bu
  			__skb_queue_tail(&neigh->queue, skb);
  			spin_unlock_irqrestore(&priv->lock, flags);
  		} else {
@@ -360,7 +341,7 @@
  			dev_kfree_skb_any(skb);
  		}
  	} else {
-@@ -816,7 +811,7 @@ static int ipoib_start_xmit(struct sk_bu
+@@ -799,7 +799,7 @@ static int ipoib_start_xmit(struct sk_bu
  					   IPOIB_QPN(phdr->hwaddr),
  					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
  				dev_kfree_skb_any(skb);
@@ -369,7 +350,7 @@
  				return NETDEV_TX_OK;
  			}
  
-@@ -842,7 +837,7 @@ static void ipoib_timeout(struct net_dev
+@@ -825,7 +825,7 @@ static void ipoib_timeout(struct net_dev
  static int ipoib_hard_header(struct sk_buff *skb,
  			     struct net_device *dev,
  			     unsigned short type,
@@ -378,7 +359,7 @@
  {
  	struct ipoib_header *header;
  
-@@ -931,9 +926,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
+@@ -914,9 +914,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(st
  void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
  	struct sk_buff *skb;
@@ -390,7 +371,7 @@
  		dev_kfree_skb_any(skb);
  	}
  	if (ipoib_cm_get(neigh))
-@@ -1008,10 +1004,6 @@ void ipoib_dev_cleanup(struct net_device
+@@ -991,10 +992,6 @@ void ipoib_dev_cleanup(struct net_device
  	priv->tx_ring = NULL;
  }
  
@@ -401,7 +382,7 @@
  static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
  		       void **tcph, u64 *hdr_flags, void *priv)
  {
-@@ -1069,13 +1061,13 @@ static void ipoib_setup(struct net_devic
+@@ -1052,13 +1049,13 @@ static void ipoib_setup(struct net_devic
  	dev->change_mtu		 = ipoib_change_mtu;
  	dev->hard_start_xmit	 = ipoib_start_xmit;
  	dev->tx_timeout		 = ipoib_timeout;
@@ -418,10 +399,10 @@
  
  	dev->watchdog_timeo	 = HZ;
  
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:55:34.000000000 +0200
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-11-03 18:56:08.000000000 +0200
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-04-16 23:46:20.000000000 +0300
 @@ -100,7 +100,7 @@ static void ipoib_mcast_free(struct ipoi
  	}
  
@@ -448,7 +429,7 @@
  			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
  		}
  		netif_tx_unlock_bh(dev);
-@@ -667,7 +668,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -677,7 +678,7 @@ void ipoib_mcast_send(struct net_device 
  	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
  	    !priv->broadcast					||
  	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
@@ -457,7 +438,7 @@
  		dev_kfree_skb_any(skb);
  		goto unlock;
  	}
-@@ -682,7 +683,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -692,7 +693,7 @@ void ipoib_mcast_send(struct net_device 
  		if (!mcast) {
  			ipoib_warn(priv, "unable to allocate memory for "
  				   "multicast structure\n");
@@ -466,7 +447,7 @@
  			dev_kfree_skb_any(skb);
  			goto out;
  		}
-@@ -697,7 +698,7 @@ void ipoib_mcast_send(struct net_device 
+@@ -707,7 +708,7 @@ void ipoib_mcast_send(struct net_device 
  		if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
  			skb_queue_tail(&mcast->pkt_queue, skb);
  		else {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_0500_warnings_ipoib_fs_to_2_6_19.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+---
+ drivers/infiniband/ulp/ipoib/ipoib_fs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -134,7 +134,7 @@ static int ipoib_mcg_open(struct inode *
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_mcg_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_mcg_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -240,7 +240,7 @@ static int ipoib_path_open(struct inode 
+ 	struct seq_file *seq;
+ 	int ret;
+ 
+-	ret = seq_open(file, &ipoib_path_seq_ops);
++	ret = seq_open(file, (struct seq_operations *) &ipoib_path_seq_ops);
+ 	if (ret)
+ 		return ret;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_x_001_2_6_9_disable_coal.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_x_001_2_6_9_disable_coal.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/ipoib_x_001_2_6_9_disable_coal.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,33 @@
+For RHEL4, need to disable coalescing when LRO is enabled,
+or get IPoIB performance degradation (TCP stack issue).
+
+Fixes Bugzilla 1494.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:25:20.000000000 +0200
++++ ofa_kernel-1.4/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2009-02-23 11:34:39.000000000 +0200
+@@ -185,10 +185,17 @@
+ 
+ 	coal = kzalloc(sizeof *coal, GFP_KERNEL);
+ 	if (coal) {
+-		coal->rx_coalesce_usecs = 10;
+-		coal->tx_coalesce_usecs = 10;
+-		coal->rx_max_coalesced_frames = 16;
+-		coal->tx_max_coalesced_frames = 16;
++		if (dev->features & NETIF_F_LRO) {
++			coal->rx_coalesce_usecs = 0;
++			coal->tx_coalesce_usecs = 0;
++			coal->rx_max_coalesced_frames = 0;
++			coal->tx_max_coalesced_frames = 0;
++		} else {
++			coal->rx_coalesce_usecs = 10;
++			coal->tx_coalesce_usecs = 10;
++			coal->rx_max_coalesced_frames = 16;
++			coal->tx_max_coalesced_frames = 16;
++		}
+ 		dev->ethtool_ops->set_coalesce(dev, coal);
+ 		kfree(coal);
+ 	}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_050_to_2_6_24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_050_to_2_6_24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_050_to_2_6_24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2008-11-05 22:23:23.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_24_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -1118,19 +1118,19 @@ static ssize_t nes_show_wqm_quanta(struc
  static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
  					const char *buf, size_t count)

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_100_to_2_6_23.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_100_to_2_6_23.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_100_to_2_6_23.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/Kconfig nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig
---- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/Kconfig	2009-05-07 08:38:45.000000000 -0500
 @@ -2,7 +2,6 @@ config INFINIBAND_NES
  	tristate "NetEffect RNIC Driver"
  	depends on PCI && INET && INFINIBAND
@@ -10,8 +10,8 @@
  	  This is a low-level driver for NetEffect RDMA enabled
  	  Network Interface Cards (RNIC).
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -360,10 +360,11 @@ struct ib_qp *nes_get_qp(struct ib_devic
   */
  static void nes_print_macaddr(struct net_device *netdev)
@@ -29,9 +29,9 @@
  
  /**
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_cm.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2008-11-07 15:26:56.000000000 -0600
-@@ -998,7 +998,6 @@ static int nes_addr_resolve_neigh(struct
+--- linux-2.6/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_cm.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1086,7 +1086,6 @@ static int nes_addr_resolve_neigh(struct
  	struct flowi fl;
  	struct neighbour *neigh;
  	int rc = -1;
@@ -39,7 +39,7 @@
  
  	memset(&fl, 0, sizeof fl);
  	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
-@@ -1012,8 +1011,11 @@ static int nes_addr_resolve_neigh(struct
+@@ -1100,8 +1099,11 @@ static int nes_addr_resolve_neigh(struct
  	if (neigh) {
  		if (neigh->nud_state & NUD_VALID) {
  			nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -53,7 +53,7 @@
  			nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
  					     dst_ip, NES_ARP_ADD);
  			rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
-@@ -1042,7 +1044,6 @@ static struct nes_cm_node *make_cm_node(
+@@ -1130,7 +1132,6 @@ static struct nes_cm_node *make_cm_node(
  	int arpindex = 0;
  	struct nes_device *nesdev;
  	struct nes_adapter *nesadapter;
@@ -61,7 +61,7 @@
  
  	/* create an hte and cm_node for this instance */
  	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
-@@ -1111,8 +1112,11 @@ static struct nes_cm_node *make_cm_node(
+@@ -1197,8 +1198,11 @@ static struct nes_cm_node *make_cm_node(
  
  	/* copy the mac addr to node context */
  	memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
@@ -76,8 +76,8 @@
  	add_hte_node(cm_core, cm_node);
  	atomic_inc(&cm_nodes_created);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2008-11-07 15:38:18.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.c	2009-05-07 08:38:45.000000000 -0500
 @@ -38,14 +38,9 @@
  #include <linux/ip.h>
  #include <linux/tcp.h>
@@ -90,10 +90,10 @@
 -module_param(nes_lro_max_aggr, uint, 0444);
 -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
 -
- static u32 crit_err_count;
- u32 int_mod_timer_init;
- u32 int_mod_cq_depth_256;
-@@ -1540,25 +1535,6 @@ static void nes_rq_wqes_timeout(unsigned
+ static int wide_ppm_offset;
+ module_param(wide_ppm_offset, int, 0644);
+ MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
+@@ -1531,25 +1526,6 @@ static void nes_rq_wqes_timeout(unsigned
  }
  
  
@@ -119,7 +119,7 @@
  /**
   * nes_init_nic_qp
   */
-@@ -1783,14 +1759,6 @@ int nes_init_nic_qp(struct nes_device *n
+@@ -1773,14 +1749,6 @@ int nes_init_nic_qp(struct nes_device *n
  			jumbomode = 1;
  		nes_nic_init_timer_defaults(nesdev, jumbomode);
  	}
@@ -134,7 +134,7 @@
  	return 0;
  }
  
-@@ -2541,7 +2509,7 @@ static void nes_nic_napi_ce_handler(stru
+@@ -2556,7 +2524,7 @@ static void nes_nic_napi_ce_handler(stru
  {
  	struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
  
@@ -143,7 +143,7 @@
  }
  
  
-@@ -2576,13 +2544,10 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2591,13 +2559,10 @@ void nes_nic_ce_handler(struct nes_devic
  	u16 pkt_type;
  	u16 rqes_processed = 0;
  	u8 sq_cqes = 0;
@@ -157,7 +157,7 @@
  	do {
  		if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
  				NES_NIC_CQE_VALID) {
-@@ -2714,17 +2679,9 @@ void nes_nic_ce_handler(struct nes_devic
+@@ -2729,17 +2694,9 @@ void nes_nic_ce_handler(struct nes_devic
  							>> 16);
  					nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
  							nesvnic->netdev->name, vlan_tag);
@@ -178,7 +178,7 @@
  
  skip_rx_indicate0:
  				nesvnic->netdev->last_rx = jiffies;
-@@ -2755,8 +2712,6 @@ skip_rx_indicate0:
+@@ -2770,8 +2727,6 @@ skip_rx_indicate0:
  
  	} while (1);
  
@@ -188,18 +188,18 @@
  		barrier();
  		/* restart the queue if it had been stopped */
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_hw.h nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h
---- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_hw.h	2009-05-07 08:38:45.000000000 -0500
 @@ -33,8 +33,6 @@
  #ifndef __NES_HW_H
  #define __NES_HW_H
  
 -#include <linux/inet_lro.h>
 -
+ #define NES_PHY_TYPE_CX4       1
  #define NES_PHY_TYPE_1G        2
  #define NES_PHY_TYPE_IRIS      3
- #define NES_PHY_TYPE_ARGUS     4
-@@ -991,8 +989,6 @@ struct nes_hw_tune_timer {
+@@ -1005,8 +1003,6 @@ struct nes_hw_tune_timer {
  #define NES_TIMER_ENABLE_LIMIT      4
  #define NES_MAX_LINK_INTERRUPTS     128
  #define NES_MAX_LINK_CHECK          200
@@ -208,7 +208,7 @@
  
  struct nes_adapter {
  	u64              fw_ver;
-@@ -1168,7 +1164,6 @@ struct nes_vnic {
+@@ -1194,7 +1190,6 @@ struct nes_vnic {
  	u32               msg_enable;
  	/* u32 tx_avail; */
  	__be32            local_ipaddr;
@@ -216,7 +216,7 @@
  	spinlock_t           tx_lock;	/* could use netdev tx lock? */
  	struct timer_list    rq_wqes_timer;
  	u32                  nic_mem_size;
-@@ -1196,9 +1191,6 @@ struct nes_vnic {
+@@ -1222,9 +1217,6 @@ struct nes_vnic {
  	u8  of_device_registered;
  	u8  rdma_enabled;
  	u8  rx_checksum_disabled;
@@ -227,8 +227,8 @@
  
  struct nes_ib_device {
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:26:56.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_23_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
 @@ -96,35 +96,38 @@ static int nics_per_function = 1;
  /**
   * nes_netdev_poll
@@ -292,7 +292,7 @@
  	netif_stop_queue(netdev);
  	list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
  		first_nesvnic = container_of(list_pos, struct nes_vnic, list);
-@@ -797,14 +798,16 @@ static int nes_netdev_set_mac_address(st
+@@ -775,14 +776,16 @@ static int nes_netdev_set_mac_address(st
  	int i;
  	u32 macaddr_low;
  	u16 macaddr_high;
@@ -312,7 +312,7 @@
  	macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
  	macaddr_high += (u16)netdev->dev_addr[1];
  	macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
-@@ -909,11 +912,11 @@ static void nes_netdev_set_multicast_lis
+@@ -887,11 +890,11 @@ static void nes_netdev_set_multicast_lis
  			if (mc_index >= max_pft_entries_avaiable)
  				break;
  			if (multicast_addr) {
@@ -329,7 +329,7 @@
  				macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
  				macaddr_high += (u16)multicast_addr->dmi_addr[1];
  				macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
-@@ -1063,9 +1066,6 @@ static const char nes_ethtool_stringset[
+@@ -1040,9 +1043,6 @@ static const char nes_ethtool_stringset[
  	"CQ Depth 32",
  	"CQ Depth 128",
  	"CQ Depth 256",
@@ -339,17 +339,17 @@
  };
  
  #define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
-@@ -1263,9 +1263,6 @@ static void nes_netdev_get_ethtool_stats
- 	target_stat_values[52] = int_mod_cq_depth_32;
- 	target_stat_values[53] = int_mod_cq_depth_128;
- 	target_stat_values[54] = int_mod_cq_depth_256;
--	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
--	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
--	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
+@@ -1240,9 +1240,6 @@ static void nes_netdev_get_ethtool_stats
+ 	target_stat_values[++index] = int_mod_cq_depth_32;
+ 	target_stat_values[++index] = int_mod_cq_depth_128;
+ 	target_stat_values[++index] = int_mod_cq_depth_256;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
  
  }
  
-@@ -1542,8 +1539,6 @@ static struct ethtool_ops nes_ethtool_op
+@@ -1525,8 +1522,6 @@ static struct ethtool_ops nes_ethtool_op
  	.set_sg = ethtool_op_set_sg,
  	.get_tso = ethtool_op_get_tso,
  	.set_tso = ethtool_op_set_tso,
@@ -358,7 +358,7 @@
  };
  
  
-@@ -1615,7 +1610,8 @@ struct net_device *nes_netdev_init(struc
+@@ -1598,7 +1593,8 @@ struct net_device *nes_netdev_init(struc
  	netdev->type = ARPHRD_ETHER;
  	netdev->features = NETIF_F_HIGHDMA;
  	netdev->ethtool_ops = &nes_ethtool_ops;
@@ -368,7 +368,7 @@
  	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
  	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
-@@ -1648,7 +1644,6 @@ struct net_device *nes_netdev_init(struc
+@@ -1630,7 +1626,6 @@ struct net_device *nes_netdev_init(struc
  
  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
  		netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_200_to_2_6_22.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_200_to_2_6_22.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_200_to_2_6_22.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,6 +1,6 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.c nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c
---- linux-2.6/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2008-11-07 15:52:27.000000000 -0600
+--- linux-2.6/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.c	2009-05-07 08:38:45.000000000 -0500
 @@ -593,16 +593,22 @@ static int __devinit nes_probe(struct pc
  						nesdev->nesadapter->port_count;
  	}
@@ -35,9 +35,9 @@
  
  	tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes.h nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h
---- linux-2.6/drivers/infiniband/hw/nes/nes.h	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2008-11-07 15:52:27.000000000 -0600
-@@ -276,14 +276,7 @@ static inline __le32 get_crc_value(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_22_patch/drivers/infiniband/hw/nes/nes.h	2009-05-07 08:38:45.000000000 -0500
+@@ -274,14 +274,7 @@ static inline __le32 get_crc_value(struc
  	u32 crc_value;
  	crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
  

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_210_to_2_6_21.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_210_to_2_6_21.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_210_to_2_6_21.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_21_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1549,6 +1549,11 @@ static void nes_netdev_vlan_rx_register(
+ 	spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ 
++static void nes_netdev_vlan_rx_kill_vid(struct net_device *netdev, 
++					unsigned short vid)
++{
++}
++
+ 
+ /**
+  * nes_netdev_init - initialize network device
+@@ -1598,6 +1603,7 @@ struct net_device *nes_netdev_init(struc
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
++	netdev->vlan_rx_kill_vid = nes_netdev_vlan_rx_kill_vid;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_300_to_2_6_13.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_300_to_2_6_13.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_300_to_2_6_13.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_nic.c nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:56.000000000 -0600
-+++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2008-11-07 15:53:09.000000000 -0600
-@@ -1640,7 +1640,6 @@ struct net_device *nes_netdev_init(struc
+--- linux-2.6/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:39:21.000000000 -0500
++++ nes.2_6_13_patch/drivers/infiniband/hw/nes/nes_nic.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1628,7 +1628,6 @@ struct net_device *nes_netdev_init(struc
  	netdev->dev_addr[3] = (u8)(u64temp>>16);
  	netdev->dev_addr[4] = (u8)(u64temp>>8);
  	netdev->dev_addr[5] = (u8)u64temp;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_400_to_2_6_9.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_400_to_2_6_9.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/iw_nes_400_to_2_6_9.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,7 @@
 diff -Nurp linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c
---- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-05 22:22:33.000000000 -0600
-+++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2008-11-07 15:53:36.000000000 -0600
-@@ -1120,6 +1120,8 @@ static int nes_setup_mmap_qp(struct nes_
+--- linux-2.6/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:37:16.000000000 -0500
++++ nes.2_6_9_patch/drivers/infiniband/hw/nes/nes_verbs.c	2009-05-07 08:38:45.000000000 -0500
+@@ -1017,6 +1017,8 @@ static int nes_setup_mmap_qp(struct nes_
  {
  	void *mem;
  	struct nes_device *nesdev = nesvnic->nesdev;
@@ -10,7 +10,7 @@
  
  	nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
  			(sizeof(struct nes_hw_qp_wqe) * rq_size) +
-@@ -1137,6 +1139,17 @@ static int nes_setup_mmap_qp(struct nes_
+@@ -1034,6 +1036,17 @@ static int nes_setup_mmap_qp(struct nes_
  			"host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
  			mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
  
@@ -28,7 +28,7 @@
  	memset(mem, 0, nesqp->qp_mem_size);
  
  	nesqp->hwqp.sq_vbase = mem;
-@@ -1509,6 +1522,8 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1409,6 +1422,8 @@ static int nes_destroy_qp(struct ib_qp *
  	/* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
  	struct nes_ucontext *nes_ucontext;
  	struct ib_qp_attr attr;
@@ -37,7 +37,7 @@
  	struct iw_cm_id *cm_id;
  	struct iw_cm_event cm_event;
  	int ret;
-@@ -1552,6 +1567,17 @@ static int nes_destroy_qp(struct ib_qp *
+@@ -1452,6 +1467,17 @@ static int nes_destroy_qp(struct ib_qp *
  			if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
  				nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
  			}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_0060_sysfs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_0060_sysfs.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_0060_sysfs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,18 +1,18 @@
-From 70e57d4e8e8df5b452a13f9b6a3c07f2df09e8a4 Mon Sep 17 00:00:00 2001
+From 20f52545712c4d0b91fb96df72ea5b1818685bc5 Mon Sep 17 00:00:00 2001
 From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 4 Dec 2008 13:40:39 +0200
+Date: Thu, 22 Jan 2009 09:41:18 +0200
 Subject: [PATCH] mlx4: Sysfs backport for RHAS4
 
 Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
 ---
- drivers/net/mlx4/main.c |   90 ++++++++++++++++++++++++++++++++--------------
- 1 files changed, 62 insertions(+), 28 deletions(-)
+ drivers/net/mlx4/main.c |   68 +++++++++++++++++++++++++++++++++++-----------
+ 1 files changed, 51 insertions(+), 17 deletions(-)
 
 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
-index 7df678d..1c01c38 100644
+index f87ebbc..fce2589 100644
 --- a/drivers/net/mlx4/main.c
 +++ b/drivers/net/mlx4/main.c
-@@ -399,18 +399,13 @@ out:
+@@ -397,18 +397,13 @@ out:
  	return err;
  }
  
@@ -34,7 +34,7 @@
  		sprintf(buf, "auto (%s)\n", type);
  	else
  		sprintf(buf, "%s\n", type);
-@@ -418,14 +413,33 @@ static ssize_t show_port_type(struct device *dev,
+@@ -416,14 +411,33 @@ static ssize_t show_port_type(struct device *dev,
  	return strlen(buf);
  }
  
@@ -42,11 +42,7 @@
 -			     struct device_attribute *attr,
 -			     const char *buf, size_t count)
 +static ssize_t show_port_type1(struct device *dev, char *buf)
- {
--	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
--						   port_attr);
--	struct mlx4_dev *mdev = info->dev;
--	struct mlx4_priv *priv = mlx4_priv(mdev);
++{
 +	struct pci_dev *pdev = to_pci_dev(dev);
 +	struct mlx4_dev *mdev = pci_get_drvdata(pdev);
 +
@@ -67,63 +63,18 @@
 +		return -ENODEV;
 +}
 +
-+static ssize_t store_port(struct mlx4_dev *dev, int port,
++static ssize_t store_port(struct mlx4_dev *mdev, int port,
 +			  const char *buf, size_t count)
-+{
-+	struct mlx4_priv *priv = mlx4_priv(dev);
+ {
+-	struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+-						   port_attr);
+-	struct mlx4_dev *mdev = info->dev;
+ 	struct mlx4_priv *priv = mlx4_priv(mdev);
 +	struct mlx4_port_info *info = &priv->port[port];
  	enum mlx4_port_type types[MLX4_MAX_PORTS];
- 	enum mlx4_port_type tmp_type;
+ 	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
  	int i;
-@@ -438,50 +452,70 @@ static ssize_t set_port_type(struct device *dev,
- 	else if (!strcmp(buf, "auto\n"))
- 		info->tmp_type = MLX4_PORT_TYPE_AUTO;
- 	else {
--		mlx4_err(mdev, "%s is not supported port type\n", buf);
-+		mlx4_err(dev, "%s is not supported port type\n", buf);
- 		return -EINVAL;
- 	}
- 
- 	mutex_lock(&priv->port_mutex);
--	mdev->caps.possible_type[info->port] = info->tmp_type;
-+	dev->caps.possible_type[info->port] = info->tmp_type; 
- 	if (info->tmp_type == MLX4_PORT_TYPE_AUTO) {
--		err = mlx4_SENSE_PORT(mdev, info->port, &tmp_type);
-+		err = mlx4_SENSE_PORT(dev, info->port, &tmp_type);
- 		if (!err && (tmp_type == MLX4_PORT_TYPE_ETH ||
- 			     tmp_type == MLX4_PORT_TYPE_IB)) {
- 			info->tmp_type = tmp_type;
- 		}
- 	}
- 
--	for (i = 0; i < mdev->caps.num_ports; i++)
-+	for (i = 0; i < dev->caps.num_ports; i++)
- 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
--					mdev->caps.possible_type[i+1];
-+					dev->caps.possible_type[i+1];
- 
- 	if (priv->trig) {
--		if (++priv->changed_ports < mdev->caps.num_ports)
-+		if (++priv->changed_ports < dev->caps.num_ports)
- 			goto out;
- 		else
- 			priv->trig = priv->changed_ports = 0;
- 	}
--	err = mlx4_check_port_params(mdev, types);
-+	err = mlx4_check_port_params(dev, types);
- 	if (err)
- 		goto out;
- 
--	for (i = 0; i < mdev->caps.num_ports; i++) {
-+	for (i = 0; i < dev->caps.num_ports; i++) {
- 		priv->port[i + 1].tmp_type = 0;
- 	}
- 
--	err = mlx4_change_port_types(mdev, types);
-+	err = mlx4_change_port_types(dev, types);
- 
- out:
- 	mutex_unlock(&priv->port_mutex);
+@@ -482,6 +496,24 @@ out:
  	return err ? err : count;
  }
  
@@ -145,9 +96,11 @@
 +	return store_port(mdev, 2, buf, count);
 +}
 +
-+
- static ssize_t trigger_port(struct device *dev,
--			    struct device_attribute *attr,
+ /*
+  * This function is invoked if user wants to modify all port types
+  * at once. We will wait for all the ports to be assigned new values,
+@@ -491,7 +523,9 @@ static ssize_t trigger_port(struct device *dev,
+ 			    struct device_attribute *attr,
  			    const char *buf, size_t count)
  {
 -	struct mlx4_priv *priv = container_of(attr, struct mlx4_priv, trigger_attr);
@@ -157,7 +110,7 @@
  	if (!priv)
  		return -ENODEV;
  
-@@ -1121,8 +1155,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+@@ -1131,8 +1165,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
  
  	sprintf(info->dev_name, "mlx4_port%d", port);
  	memcpy(&info->port_attr.attr, &attr, sizeof(attr));

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0099_no_multiqueue.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0099_no_multiqueue.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0099_no_multiqueue.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,73 @@
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index bce700a..a9ee0d7 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -963,7 +963,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	int i;
+ 	int err;
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
++	dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ 	if (dev == NULL) {
+ 		mlx4_err(mdev, "Net device allocation failed\n");
+ 		return -ENOMEM;
+@@ -1036,7 +1036,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	dev->open = &mlx4_en_open;
+ 	dev->stop = &mlx4_en_close;
+ 	dev->hard_start_xmit = &mlx4_en_xmit;
+-	dev->select_queue = &mlx4_en_select_queue;
+ 	dev->get_stats = &mlx4_en_get_stats;
+ 	dev->set_multicast_list = &mlx4_en_set_multicast;
+ 	dev->set_mac_address = &mlx4_en_set_mac;
+diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
+index 3d8246f..72e166f 100644
+--- a/drivers/net/mlx4/en_tx.c
++++ b/drivers/net/mlx4/en_tx.c
+@@ -392,7 +392,7 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+ 			 *   transmission on that ring would stop the queue.
+ 			 */
+ 			ring->blocked = 0;
+-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
++			netif_wake_queue(dev);
+ 			priv->port_stats.wake_queue++;
+ 		}
+ 	}
+@@ -612,7 +612,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
+ 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ }
+ 
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
++static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 	u16 vlan_tag = 0;
+@@ -703,7 +703,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_ind = skb->queue_mapping;
++	tx_ind = mlx4_en_select_queue(dev, skb);
+ 	ring = &priv->tx_ring[tx_ind];
+ 	if (priv->vlgrp && vlan_tx_tag_present(skb))
+ 		vlan_tag = vlan_tx_tag_get(skb);
+@@ -713,7 +713,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 		/* every full Tx ring stops queue.
+ 		 * TODO: implement multi-queue support (per-queue stop) */
+-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
++		netif_stop_queue(dev);
+ 		ring->blocked = 1;
+ 		priv->port_stats.queue_stopped++;
+ 
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index adef17c..995e318 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -521,7 +521,6 @@ void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq);
+ void mlx4_en_poll_tx_cq(unsigned long data);
+ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+-int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+ 
+ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ 			   u32 size, u16 stride);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0100_to_2.6.24.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0100_to_2.6.24.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0100_to_2.6.24.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,33 +1,5 @@
-From 6d0a6cc6981404e95297924c9a8f169b3447a98e Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Thu, 2 Oct 2008 11:26:30 +0300
-Subject: [PATCH] mlx4_en: LRO backport patch
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile    |    2 +-
- drivers/net/mlx4/en_cq.c     |   23 ++-
- drivers/net/mlx4/en_lro.c    |  529 ++++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_netdev.c |    5 +-
- drivers/net/mlx4/en_params.c |   19 +--
- drivers/net/mlx4/en_rx.c     |  121 +++-------
- drivers/net/mlx4/en_tx.c     |    1 +
- drivers/net/mlx4/mlx4_en.h   |   60 +++++-
- 8 files changed, 646 insertions(+), 114 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index acb1b91..e6bfdec 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -5,4 +5,4 @@ mlx4_core-y :=  alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-
- obj-$(CONFIG_MLX4_EN)                  += mlx4_en.o
-
--mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_lro.o
 diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
-index cf9c5df..2348bfc 100644
+index a4d6e46..440633a 100644
 --- a/drivers/net/mlx4/en_cq.c
 +++ b/drivers/net/mlx4/en_cq.c
 @@ -34,6 +34,7 @@
@@ -38,7 +10,7 @@
  
  #include "mlx4_en.h"
  
-@@ -100,8 +101,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -106,8 +107,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  		cq->timer.function = mlx4_en_poll_tx_cq;
  		cq->timer.data = (unsigned long) cq;
  	} else {
@@ -58,7 +30,7 @@
  	}
  
  	return 0;
-@@ -123,8 +133,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+@@ -129,8 +139,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
  
  	if (cq->is_tx)
  		del_timer(&cq->timer);
@@ -74,548 +46,8 @@
  
  	mlx4_cq_free(mdev->dev, &cq->mcq);
  }
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..9d17376
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,534 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index 6199f4c..92f91d2 100644
+index 2d57ae6..3b394d7 100644
 --- a/drivers/net/mlx4/en_netdev.c
 +++ b/drivers/net/mlx4/en_netdev.c
 @@ -333,7 +333,8 @@ void mlx4_en_netpoll(struct net_device *dev)
@@ -625,10 +57,10 @@
 -		napi_synchronize(&cq->napi);
 +		while (test_bit(__LINK_STATE_RX_SCHED, &cq->poll_dev->state))
 +			msleep(1);
- 		mlx4_en_process_rx_cq(dev, cq, 0);
- 		spin_unlock_irqrestore(&cq->lock, flags);
- 	}
-@@ -757,8 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
+ 		if (priv->rx_ring[i].use_frags)
+ 			mlx4_en_process_rx_cq(dev, cq, 0);
+ 		else
+@@ -755,8 +756,6 @@ void mlx4_en_stop_port(struct net_device *dev)
  	/* Free RX Rings */
  	for (i = 0; i < priv->rx_ring_num; i++) {
  		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -637,226 +69,11 @@
  		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  	}
  }
-diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
-index 6e96d0a..2b0ca84 100644
---- a/drivers/net/mlx4/en_params.c
-+++ b/drivers/net/mlx4/en_params.c
-@@ -142,21 +142,6 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-  * Ethtool support
-  */
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -266,8 +251,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-@@ -457,7 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
- 	.get_rx_csum = mlx4_en_get_rx_csum,
- 	.set_rx_csum = mlx4_en_set_rx_csum,
- 	.get_tx_csum = ethtool_op_get_tx_csum,
--	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
- 	.get_strings = mlx4_en_get_strings,
- 	.get_sset_count = mlx4_en_get_sset_count,
- 	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
 diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index 5a95d74..16f07bc 100644
+index 9d144cd..14b5faf 100644
 --- a/drivers/net/mlx4/en_rx.c
 +++ b/drivers/net/mlx4/en_rx.c
-@@ -37,6 +37,7 @@
- #include <linux/if_ether.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/etherdevice.h>
- 
- #include "mlx4_en.h"
- 
-@@ -51,18 +52,6 @@ static void mlx4_en_srq_event(struct mlx
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -372,23 +361,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		mlx4_err(mdev, "Failed to allocate lro array\n");
-+	/* Allocate LRO sessions */
-+	if (mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-+		mlx4_err(mdev, "Failed allocating lro sessions\n");
- 		goto err_map;
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -501,7 +478,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -522,12 +499,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -570,11 +547,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -663,11 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -718,38 +693,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
- 			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--						MLX4_CQE_VLAN_PRESENT_MASK)) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -790,13 +737,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -820,30 +769,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+@@ -1041,30 +1041,34 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
  	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
  
  	if (priv->port_up)
@@ -877,8 +94,8 @@
  	int done;
 +	int work = min(*budget, poll_dev->quota);
  
--	done = mlx4_en_process_rx_cq(dev, cq, budget);
-+	done = mlx4_en_process_rx_cq(dev, cq, work);
+-	done = cq->process_cq(dev, cq, budget);
++	done = cq->process_cq(dev, cq, work);
 +	dev->quota -= done;
 +	*budget -= done;
  
@@ -901,76 +118,11 @@
  }
  
  
-diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
-index 2297fec..59f8772 100644
---- a/drivers/net/mlx4/en_tx.c
-+++ b/drivers/net/mlx4/en_tx.c
-@@ -37,6 +37,7 @@
- #include <linux/skbuff.h>
- #include <linux/if_vlan.h>
- #include <linux/vmalloc.h>
-+#include <linux/tcp.h>
- 
- #include "mlx4_en.h"
- 
 diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index 1011803..7c8c489 100644
+index ebaed67..456cd91 100644
 --- a/drivers/net/mlx4/mlx4_en.h
 +++ b/drivers/net/mlx4/mlx4_en.h
-@@ -38,7 +38,7 @@
- #include <linux/list.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
--#include <linux/inet_lro.h>
-+#include <net/checksum.h>
- 
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/qp.h>
-@@ -268,11 +268,42 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -311,7 +342,7 @@ struct mlx4_en_cq {
+@@ -328,7 +328,7 @@ struct mlx4_en_cq {
  	int                     ring;
  	spinlock_t              lock;
  	struct net_device      *dev;
@@ -979,43 +131,12 @@
  	/* Per-core Tx cq processing support */
  	struct timer_list timer;
  	int size;
-@@ -541,7 +572,7 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- int mlx4_en_process_rx_cq(struct net_device *dev,
- 			  struct mlx4_en_cq *cq,
- 			  int budget);
+@@ -571,7 +571,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
+ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
+ 			      struct mlx4_en_cq *cq,
+ 			      int budget);
 -int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 +int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
  void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
  			     int is_tx, int rss, int qpn, int cqn, int srqn,
  			     struct mlx4_qp_context *context);
-@@ -552,6 +583,27 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv* priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv* priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
--- 
-1.5.4
-

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0110_no_set_flags.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0110_no_set_flags.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/mlx4_en_0110_no_set_flags.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -7,9 +7,9 @@
 --- ofed_kernel.orig/drivers/net/mlx4/en_params.c
 +++ ofed_kernel/drivers/net/mlx4/en_params.c
 @@ -593,8 +593,6 @@ const struct ethtool_ops mlx4_en_ethtool
- 	.get_pauseparam = mlx4_en_get_pauseparam,
  	.set_pauseparam = mlx4_en_set_pauseparam,
  	.get_ringparam = mlx4_en_get_ringparam,
+ 	.set_ringparam = mlx4_en_set_ringparam,
 -	.get_flags = ethtool_op_get_flags,
 -	.set_flags = ethtool_op_set_flags,
  };

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/sdp_7277_to_2_6_11.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/sdp_7277_to_2_6_11.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/sdp_7277_to_2_6_11.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -6,7 +6,7 @@
 ===================================================================
 --- ofed_kernel-2.6.9_U4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
 +++ ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -576,7 +576,6 @@ adjudge_to_death:
+@@ -580,7 +580,6 @@ adjudge_to_death:
  		/* TODO: tcp_fin_time to get timeout */
  		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
  			atomic_read(&sk->sk_refcnt));
@@ -14,15 +14,16 @@
  	}
  
  	/* TODO: limit number of orphaned sockets.
-@@ -850,7 +849,6 @@ void sdp_cancel_dreq_wait_timeout(struct
+@@ -860,8 +859,6 @@ void sdp_cancel_dreq_wait_timeout(struct
  		/* The timeout hasn't reached - need to clean ref count */
  		sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
  	}
+-
 -	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
  }
  
  void sdp_destroy_work(struct work_struct *work)
-@@ -890,9 +888,6 @@ void sdp_dreq_wait_timeout_work(struct w
+@@ -901,9 +898,6 @@ void sdp_dreq_wait_timeout_work(struct w
  
  	sdp_sk(sk)->dreq_wait_timeout = 0;
  
@@ -32,7 +33,7 @@
  	sdp_exch_state(sk, TCPF_LAST_ACK | TCPF_FIN_WAIT1, TCP_TIME_WAIT);
  
  	release_sock(sk);
-@@ -2131,7 +2126,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
+@@ -2163,7 +2157,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
  
  static atomic_t sockets_allocated;
  static atomic_t memory_allocated;
@@ -40,7 +41,7 @@
  static int memory_pressure;
  struct proto sdp_proto = {
          .close       = sdp_close,
-@@ -2152,13 +2146,11 @@ struct proto sdp_proto = {
+@@ -2184,13 +2177,11 @@ struct proto sdp_proto = {
  	.sockets_allocated = &sockets_allocated,
  	.memory_allocated = &memory_allocated,
  	.memory_pressure = &memory_pressure,
@@ -56,7 +57,7 @@
  	.name	     = "SDP",
  };
  
-@@ -2517,9 +2509,6 @@ static void __exit sdp_exit(void)
+@@ -2569,9 +2560,6 @@ static void __exit sdp_exit(void)
  	sock_unregister(PF_INET_SDP);
  	proto_unregister(&sdp_proto);
  

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/t3_hw_to_2_6_5-7_244.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/t3_hw_to_2_6_5-7_244.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/backport/2.6.9_U7/t3_hw_to_2_6_5-7_244.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,13 +1,8 @@
----
- drivers/net/cxgb3/adapter.h |    1 +
- drivers/net/cxgb3/t3_hw.c   |    6 ++----
- 2 files changed, 3 insertions(+), 4 deletions(-)
-
-Index: ofed_kernel/drivers/net/cxgb3/adapter.h
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/adapter.h
-+++ ofed_kernel/drivers/net/cxgb3/adapter.h
-@@ -194,6 +194,7 @@ struct adapter {
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 21dad82..1c4e828 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -207,6 +207,7 @@ struct adapter {
  	struct list_head adapter_list;
  	void __iomem *regs;
  	struct pci_dev *pdev;
@@ -15,11 +10,11 @@
  	unsigned long registered_device_map;
  	unsigned long open_device_map;
  	unsigned long flags;
-Index: ofed_kernel/drivers/net/cxgb3/t3_hw.c
-===================================================================
---- ofed_kernel.orig/drivers/net/cxgb3/t3_hw.c
-+++ ofed_kernel/drivers/net/cxgb3/t3_hw.c
-@@ -3494,7 +3494,7 @@ static int t3_reset_adapter(struct adapt
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index 533fc74..f6c000e 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -3534,7 +3534,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  	uint16_t devid = 0;
  
  	if (save_and_restore_pcie)
@@ -28,7 +23,7 @@
  	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
  
  	/*
-@@ -3512,7 +3512,7 @@ static int t3_reset_adapter(struct adapt
+@@ -3552,7 +3552,7 @@ static int t3_reset_adapter(struct adapter *adapter)
  		return -1;
  
  	if (save_and_restore_pcie)
@@ -37,12 +32,12 @@
  	return 0;
  }
  
-@@ -3640,8 +3640,6 @@ int t3_prep_adapter(struct adapter *adap
+@@ -3688,8 +3688,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
  
  		memcpy(adapter->port[i]->dev_addr, hw_addr,
  		       ETH_ALEN);
 -		memcpy(adapter->port[i]->perm_addr, hw_addr,
 -		       ETH_ALEN);
- 		init_link_config(&p->link_config, p->port_type->caps);
+ 		init_link_config(&p->link_config, p->phy.caps);
  		p->phy.ops->power_down(&p->phy, 1);
- 		if (!(p->port_type->caps & SUPPORTED_IRQ))
+ 		if (!(p->phy.caps & SUPPORTED_IRQ))

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0100_unified_tcp_ports.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0100_unified_tcp_ports.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0100_unified_tcp_ports.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -13,13 +13,13 @@
 Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
 ---
 
- drivers/infiniband/core/cma.c |   43 ++++++++++++++++++++++++++++++++++++++++-
- 1 files changed, 42 insertions(+), 1 deletions(-)
+ drivers/infiniband/core/cma.c |   43 +++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 42 insertions(+), 1 deletion(-)
 
-Index: ofed_kernel/drivers/infiniband/core/cma.c
+Index: ofa_kernel-1.4/drivers/infiniband/core/cma.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/core/cma.c
-+++ ofed_kernel/drivers/infiniband/core/cma.c
+--- ofa_kernel-1.4.orig/drivers/infiniband/core/cma.c
++++ ofa_kernel-1.4/drivers/infiniband/core/cma.c
 @@ -58,6 +58,11 @@ static int tavor_quirk = 0;
  module_param_named(tavor_quirk, tavor_quirk, int, 0644);
  MODULE_PARM_DESC(tavor_quirk, "Tavor performance quirk: limit MTU to 1K if > 0");
@@ -40,7 +40,7 @@
  	struct hlist_node	node;
  	struct list_head	list; /* listen_any_list or cma_device.list */
  	struct list_head	listen_list; /* per device listens */
-@@ -815,6 +821,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
+@@ -815,6 +821,8 @@ static void cma_release_port(struct rdma
  		kfree(bind_list);
  	}
  	mutex_unlock(&lock);
@@ -49,7 +49,7 @@
  }
  
  static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
-@@ -2049,6 +2057,34 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
+@@ -2049,6 +2057,34 @@ static int cma_use_port(struct idr *ps, 
  	return 0;
  }
  
@@ -69,7 +69,7 @@
 +		sock_release(sock);
 +		return ret;
 +	}
-+	size = ip_addr_size(&id_priv->id.route.addr.src_addr);
++	size = ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr);
 +	ret = sock->ops->getname(sock,
 +			(struct sockaddr *) &id_priv->id.route.addr.src_addr,
 +			&size, 0);
@@ -84,7 +84,7 @@
  static int cma_get_port(struct rdma_id_private *id_priv)
  {
  	struct idr *ps;
-@@ -2060,6 +2096,11 @@ static int cma_get_port(struct rdma_id_private *id_priv)
+@@ -2060,6 +2096,11 @@ static int cma_get_port(struct rdma_id_p
  		break;
  	case RDMA_PS_TCP:
  		ps = &tcp_ps;
@@ -96,7 +96,7 @@
  		break;
  	case RDMA_PS_UDP:
  		ps = &udp_ps;
-@@ -2077,7 +2118,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
+@@ -2077,7 +2118,7 @@ static int cma_get_port(struct rdma_id_p
  	else
  		ret = cma_use_port(ps, id_priv);
  	mutex_unlock(&lock);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0200_create_cm_id_even_when_port_is_down.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0200_create_cm_id_even_when_port_is_down.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0200_create_cm_id_even_when_port_is_down.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,113 @@
+From fdf4fe134b91a0079c4eb3a17dcaa2d5e2cf8db9 Mon Sep 17 00:00:00 2001
+From: Yossi Etigin <yosefe at Voltaire.COM>
+Date: Mon, 30 Mar 2009 20:06:12 +0300
+Subject: [PATCH] rdma_cm: create cm id even when port is down
+
+  When doing rdma_resolve_addr() and relevant port is down, the function fails
+and rdma_cm id is not bound to the device. Therefore, application does not have
+device handle and cannot wait for the port to become active. The function
+fails because ipoib is not joined to the multicast group and therefore sa does
+not have a multicast record to take a qkey from.
+  The proposed patch is to make lazy qkey resolution - cma_set_qkey will set
+id_priv->qkey if it was not set, and will be called just before the qkey is
+really required.
+
+Signed-off-by: Yossi Etigin <yosefe at voltaire.com>
+---
+ drivers/infiniband/core/cma.c |   41 +++++++++++++++++++++++++++--------------
+ 1 files changed, 27 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index d951896..b92120c 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -296,21 +296,25 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv)
+ 	id_priv->cma_dev = NULL;
+ }
+ 
+-static int cma_set_qkey(struct ib_device *device, u8 port_num,
+-			enum rdma_port_space ps,
+-			struct rdma_dev_addr *dev_addr, u32 *qkey)
++static int cma_set_qkey(struct rdma_id_private *id_priv)
+ {
+ 	struct ib_sa_mcmember_rec rec;
+ 	int ret = 0;
+ 
+-	switch (ps) {
++	if (id_priv->qkey)
++		return 0;
++
++	switch (id_priv->id.ps) {
+ 	case RDMA_PS_UDP:
+-		*qkey = RDMA_UDP_QKEY;
++		id_priv->qkey = RDMA_UDP_QKEY;
+ 		break;
+ 	case RDMA_PS_IPOIB:
+-		ib_addr_get_mgid(dev_addr, &rec.mgid);
+-		ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
+-		*qkey = be32_to_cpu(rec.qkey);
++		ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
++		ret = ib_sa_get_mcmember_rec(id_priv->id.device,
++					     id_priv->id.port_num, &rec.mgid,
++					     &rec);
++		if (!ret)
++			id_priv->qkey = be32_to_cpu(rec.qkey);
+ 		break;
+ 	default:
+ 		break;
+@@ -340,12 +344,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
+ 		ret = ib_find_cached_gid(cma_dev->device, &gid,
+ 					 &id_priv->id.port_num, NULL);
+ 		if (!ret) {
+-			ret = cma_set_qkey(cma_dev->device,
+-					   id_priv->id.port_num,
+-					   id_priv->id.ps, dev_addr,
+-					   &id_priv->qkey);
+-			if (!ret)
+-				cma_attach_to_dev(id_priv, cma_dev);
++			cma_attach_to_dev(id_priv, cma_dev);
+ 			break;
+ 		}
+ 	}
+@@ -577,6 +576,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
+ 	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ 
+ 	if (cma_is_ud_ps(id_priv->id.ps)) {
++		ret = cma_set_qkey(id_priv);
++		if (ret)
++			return ret;
++
+ 		qp_attr->qkey = id_priv->qkey;
+ 		*qp_attr_mask |= IB_QP_QKEY;
+ 	} else {
+@@ -2167,6 +2170,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
+ 			event.status = ib_event->param.sidr_rep_rcvd.status;
+ 			break;
+ 		}
++		ret = cma_set_qkey(id_priv);
++		if (ret) {
++			event.event = RDMA_CM_EVENT_ADDR_ERROR;
++			event.status = -EINVAL;
++			break;
++		}
+ 		if (id_priv->qkey != rep->qkey) {
+ 			event.event = RDMA_CM_EVENT_UNREACHABLE;
+ 			event.status = -EINVAL;
+@@ -2446,10 +2455,14 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
+ 			     const void *private_data, int private_data_len)
+ {
+ 	struct ib_cm_sidr_rep_param rep;
++	int ret;
+ 
+ 	memset(&rep, 0, sizeof rep);
+ 	rep.status = status;
+ 	if (status == IB_SIDR_SUCCESS) {
++		ret = cma_set_qkey(id_priv);
++		if (ret)
++			return ret;
+ 		rep.qp_num = id_priv->qp_num;
+ 		rep.qkey = id_priv->qkey;
+ 	}
+-- 
+1.5.4.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0300_use_rate_from_ipoib_bcast_when_join_mcast.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0300_use_rate_from_ipoib_bcast_when_join_mcast.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cma_0300_use_rate_from_ipoib_bcast_when_join_mcast.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+rdma_cm: Use rate from ipoib broadcast when joining ipoib multicast
+
+   When joining IPoIB multicast group, use the same rate as in the broadcast
+group. Otherwise, if rdma_cm creates this group before IPoIB does, it might get
+a different rate. This will cause IPoIB to fail joining to the same group later
+on, because IPoIB has a strict rate selection.
+
+Signed-off-by: Yossi Etigin <yosefe at voltaire.com>
+
+---
+
+Index: b/drivers/infiniband/core/cma.c
+===================================================================
+--- a/drivers/infiniband/core/cma.c	2009-03-30 19:53:19.000000000 +0300
++++ b/drivers/infiniband/core/cma.c	2009-03-30 21:38:02.000000000 +0300
+@@ -2726,6 +2726,10 @@ static int cma_join_ib_multicast(struct
+  		    IB_SA_MCMEMBER_REC_FLOW_LABEL |
+  		    IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
+
++	if (id_priv->id.ps == RDMA_PS_IPOIB)
++		comp_mask |= IB_SA_MCMEMBER_REC_RATE |
++			     IB_SA_MCMEMBER_REC_RATE_SELECTOR;
++
+  	mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
+  						id_priv->id.port_num, &rec,
+  						comp_mask, GFP_KERNEL,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0220_sysfs_lifetime.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0220_sysfs_lifetime.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0220_sysfs_lifetime.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,46 @@
+ib_core: fix class-device lifetime to avoid race leading to kernel Oops.
+
+Unregister sysfs files in ib_register_device. ib_unregister_device()
+will not return until all sysfs files are gone (and no open file handles
+remain). This way, the low-level driver will not do its cleanup while
+there are outstanding accesses on sysfs.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Roland Dreier <rolandd at cisco.com>
+
+Index: ofed_kernel/drivers/infiniband/core/device.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/device.c	2009-02-24 12:19:25.765990000 +0200
++++ ofed_kernel/drivers/infiniband/core/device.c	2009-02-24 12:19:46.598236000 +0200
+@@ -199,7 +199,7 @@ void ib_dealloc_device(struct ib_device 
+ 
+ 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
+ 
+-	ib_device_unregister_sysfs(device);
++	kobject_put(&device->dev.kobj);
+ }
+ EXPORT_SYMBOL(ib_dealloc_device);
+ 
+@@ -356,6 +356,8 @@ void ib_unregister_device(struct ib_devi
+ 
+ 	mutex_unlock(&device_mutex);
+ 
++	ib_device_unregister_sysfs(device);
++
+ 	spin_lock_irqsave(&device->client_data_lock, flags);
+ 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ 		kfree(context);
+Index: ofed_kernel/drivers/infiniband/core/sysfs.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sysfs.c	2009-02-24 12:19:25.767996000 +0200
++++ ofed_kernel/drivers/infiniband/core/sysfs.c	2009-02-24 12:45:06.245588000 +0200
+@@ -863,6 +863,9 @@ void ib_device_unregister_sysfs(struct i
+ 	struct kobject *p, *t;
+ 	struct ib_port *port;
+ 
++	/* Hold kobject until ib_dealloc_device() */
++	kobject_get(&device->dev.kobj);
++
+ 	list_for_each_entry_safe(p, t, &device->port_list, entry) {
+ 		list_del(&p->entry);
+ 		port = container_of(p, struct ib_port, kobj);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0230_Fix_RMPP_header_RRespTime_manipulation.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0230_Fix_RMPP_header_RRespTime_manipulation.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0230_Fix_RMPP_header_RRespTime_manipulation.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,31 @@
+IB_core: Fix RMPP header RRespTime manipulation.
+
+Fix ib_set_rmpp_flags() to use the correct bit mask for RRespTime.
+In the 8-bit field of the RMPP header, the first 5 bits
+are RRespTime and next 3 bits are RMPPFlags. Hence to retain
+the first 5 bits, the mask should be 0xF8 instead of 0xF1.
+
+Signed-off-by: Ramachandra K <ramachandra.kuchimanchi at qlogic.com>
+---
+
+diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
+index 5f6c40f..1a0f409 100644
+--- a/include/rdma/ib_mad.h
++++ b/include/rdma/ib_mad.h
+@@ -290,7 +290,7 @@ static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
+  */
+ static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
+ {
+-	rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
++	rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
+ 				     (flags & 0x7);
+ }
+ 
+
+
+
+_______________________________________________
+ewg mailing list
+ewg at lists.openfabrics.org
+http://lists.openfabrics.org/cgi-bin/mailman/listinfo/ewg
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0240_fix_null_pointer_dereference_in_local_completions.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0240_fix_null_pointer_dereference_in_local_completions.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0240_fix_null_pointer_dereference_in_local_completions.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,68 @@
+IB_core: fix null pointer dereference in local_completions()
+
+handle_outgoing_dr_smp() can queue a struct ib_mad_local_private *local
+on the mad_agent_priv->local_work work queue with
+local->mad_priv == NULL if device->process_mad() returns
+IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY and
+(!ib_response_mad(&mad_priv->mad.mad) ||
+ !mad_agent_priv->agent.recv_handler).
+
+In this case, local_completions() will be called with
+local->mad_priv == NULL. The code does check for this
+case and skips calling recv_mad_agent->agent.recv_handler()
+but recv == 0 so kmem_cache_free() is called with a
+NULL pointer.
+
+Also, since recv isn't reinitialized each time through the loop,
+it can cause a memory leak if recv should have been zero.
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 5c54fc2..735ad4e 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -2356,7 +2356,7 @@ static void local_completions(struct work_struct *work)
+ 	struct ib_mad_local_private *local;
+ 	struct ib_mad_agent_private *recv_mad_agent;
+ 	unsigned long flags;
+-	int recv = 0;
++	int free_mad;
+ 	struct ib_wc wc;
+ 	struct ib_mad_send_wc mad_send_wc;
+ 
+@@ -2370,14 +2370,15 @@ static void local_completions(struct work_struct *work)
+ 				   completion_list);
+ 		list_del(&local->completion_list);
+ 		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
++		free_mad = 0;
+ 		if (local->mad_priv) {
+ 			recv_mad_agent = local->recv_mad_agent;
+ 			if (!recv_mad_agent) {
+ 				printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
++				free_mad = 1;
+ 				goto local_send_completion;
+ 			}
+ 
+-			recv = 1;
+ 			/*
+ 			 * Defined behavior is to complete response
+ 			 * before request
+@@ -2422,7 +2423,7 @@ local_send_completion:
+ 
+ 		spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ 		atomic_dec(&mad_agent_priv->refcount);
+-		if (!recv)
++		if (free_mad)
+ 			kmem_cache_free(ib_mad_cache, local->mad_priv);
+ 		kfree(local);
+ 	}
+
+
+_______________________________________________
+general mailing list
+general at lists.openfabrics.org
+http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general
+
+To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0250_initialize_mad_agent_priv_before_putting_on_lists.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0250_initialize_mad_agent_priv_before_putting_on_lists.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0250_initialize_mad_agent_priv_before_putting_on_lists.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,58 @@
+IB_core: initialize mad_agent_priv before putting on lists.
+
+There is a potential race in ib_register_mad_agent() where the struct
+ib_mad_agent_private is not fully initialized before it is added
+to the list of agents per IB port. This means the ib_mad_agent_private
+could be seen before the refcount, spin locks, and linked lists
+are initialized.  The fix is to initialize the structure earlier.
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 735ad4e..dbcd285 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
+ 	mad_agent_priv->agent.context = context;
+ 	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
+ 	mad_agent_priv->agent.port_num = port_num;
++	spin_lock_init(&mad_agent_priv->lock);
++	INIT_LIST_HEAD(&mad_agent_priv->send_list);
++	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
++	INIT_LIST_HEAD(&mad_agent_priv->done_list);
++	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
++	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
++	INIT_LIST_HEAD(&mad_agent_priv->local_list);
++	INIT_WORK(&mad_agent_priv->local_work, local_completions);
++	atomic_set(&mad_agent_priv->refcount, 1);
++	init_completion(&mad_agent_priv->comp);
+ 
+ 	spin_lock_irqsave(&port_priv->reg_lock, flags);
+ 	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
+@@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
+ 	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
+ 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+ 
+-	spin_lock_init(&mad_agent_priv->lock);
+-	INIT_LIST_HEAD(&mad_agent_priv->send_list);
+-	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
+-	INIT_LIST_HEAD(&mad_agent_priv->done_list);
+-	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
+-	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
+-	INIT_LIST_HEAD(&mad_agent_priv->local_list);
+-	INIT_WORK(&mad_agent_priv->local_work, local_completions);
+-	atomic_set(&mad_agent_priv->refcount, 1);
+-	init_completion(&mad_agent_priv->comp);
+-
+ 	return &mad_agent_priv->agent;
+ 
+ error4:
+
+
+_______________________________________________
+general mailing list
+general at lists.openfabrics.org
+http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general
+
+To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0260_ib_post_send_mad_returns_zero_no_completion.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0260_ib_post_send_mad_returns_zero_no_completion.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0260_ib_post_send_mad_returns_zero_no_completion.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,58 @@
+IB_core: ib_post_send_mad() returns zero but doesn't generate send completion.
+
+If ib_post_send_mad() returns zero, it guarantees that there will be
+a callback to the send_buf->mad_agent->send_handler() so that the
+sender can call ib_free_send_mad(). Otherwise, the ib_mad_send_buf
+will be leaked and the mad_agent reference count will never go to zero
+and the IB device module cannot be unloaded.
+The above can happen without this patch if process_mad() returns
+(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED).
+
+If process_mad() returns IB_MAD_RESULT_SUCCESS and there is no agent
+registered to receive the mad being sent, handle_outgoing_dr_smp()
+returns zero which causes a MAD packet which is at the end of the
+directed route to be incorrectly sent on the wire but doesn't cause
+a hang since the HCA generates a send completion.
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index dbcd285..62a99dc 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -742,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
+ 		break;
+ 	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
+ 		kmem_cache_free(ib_mad_cache, mad_priv);
+-		kfree(local);
+-		ret = 1;
+-		goto out;
++		break;
+ 	case IB_MAD_RESULT_SUCCESS:
+ 		/* Treat like an incoming receive MAD */
+ 		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
+@@ -755,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
+ 						        &mad_priv->mad.mad);
+ 		}
+ 		if (!port_priv || !recv_mad_agent) {
++			/*
++			 * No receiving agent so drop packet and
++			 * generate send completion.
++			 */
+ 			kmem_cache_free(ib_mad_cache, mad_priv);
+-			kfree(local);
+-			ret = 0;
+-			goto out;
++			break;
+ 		}
+ 		local->mad_priv = mad_priv;
+ 		local->recv_mad_agent = recv_mad_agent;
+
+
+_______________________________________________
+general mailing list
+general at lists.openfabrics.org
+http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general
+
+To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0270_sa_query_update_sm_ah_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0270_sa_query_update_sm_ah_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0270_sa_query_update_sm_ah_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,35 @@
+IB/sa_query: fix update_sm_ah() race condition
+
+Our testing uncovered a race condition in ib_sa_event():
+    spin_lock_irqsave(&port->ah_lock, flags);
+    if (port->sm_ah)
+        kref_put(&port->sm_ah->ref, free_sm_ah);
+        port->sm_ah = NULL;
+    spin_unlock_irqrestore(&port->ah_lock, flags);
+
+    schedule_work(&sa_dev->port[event->element.port_num -
+                  sa_dev->start_port].update_task);
+
+If two events occur back-to-back (e.g., client-reregister and lid change),
+both may pass the spinlock-protected code above before the first
+schedule_work updates the port->sm_ah handle.
+The second schedule_work operation will then find a non-null port->ah_lock,
+and will simply overwrite it in update_sm_ah -- resulting in an ah leak.
+
+Fixes bugzilla 1521.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofed_kernel/drivers/infiniband/core/sa_query.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/core/sa_query.c	2009-03-02 18:30:05.000000000 +0200
++++ ofed_kernel/drivers/infiniband/core/sa_query.c	2009-03-02 18:30:55.133143000 +0200
+@@ -559,6 +559,8 @@ static void update_sm_ah(struct work_str
+ 	}
+ 
+ 	spin_lock_irq(&port->ah_lock);
++	if (port->sm_ah)
++		kref_put(&port->sm_ah->ref, free_sm_ah);
+ 	port->sm_ah = new_ah;
+ 	spin_unlock_irq(&port->ah_lock);
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0280_warnings_fs_nfs_nfsroot.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0280_warnings_fs_nfs_nfsroot.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/core_0280_warnings_fs_nfs_nfsroot.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+---
+ fs/nfs/nfsroot.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: ofa_kernel-1.4/fs/nfs/nfsroot.c
+===================================================================
+--- ofa_kernel-1.4.orig/fs/nfs/nfsroot.c
++++ ofa_kernel-1.4/fs/nfs/nfsroot.c
+@@ -376,6 +376,7 @@ static int __init root_nfs_init(void)
+ }
+ 
+ 
++#ifndef MODULE
+ /*
+  *  Parse NFS server and directory information passed on the kernel
+  *  command line.
+@@ -396,6 +397,7 @@ static int __init nfs_root_setup(char *l
+ }
+ 
+ __setup("nfsroot=", nfs_root_setup);
++#endif
+ 
+ /***************************************************************************
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00600_sfpplus.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00600_sfpplus.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00600_sfpplus.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,1898 @@
+diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
+index 2711404..69b5340 100644
+--- a/drivers/net/cxgb3/adapter.h
++++ b/drivers/net/cxgb3/adapter.h
+@@ -54,7 +54,6 @@ struct port_info {
+ 	struct adapter *adapter;
+ 	struct vlan_group *vlan_grp;
+ 	struct sge_qset *qs;
+-	const struct port_type_info *port_type;
+ 	u8 port_id;
+ 	u8 rx_csum_offload;
+ 	u8 nqsets;
+@@ -282,6 +281,7 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
+ void t3_os_ext_intr_handler(struct adapter *adapter);
+ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
+ 			int speed, int duplex, int fc);
++void t3_os_phymod_changed(struct adapter *adap, int port_id);
+ 
+ void t3_sge_start(struct adapter *adap);
+ void t3_sge_stop(struct adapter *adap);
+diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
+index ee140e6..744fac0 100644
+--- a/drivers/net/cxgb3/ael1002.c
++++ b/drivers/net/cxgb3/ael1002.c
+@@ -33,17 +33,57 @@
+ #include "regs.h"
+ 
+ enum {
++	PMD_RSD     = 10,   /* PMA/PMD receive signal detect register */
++	PCS_STAT1_X = 24,   /* 10GBASE-X PCS status 1 register */
++	PCS_STAT1_R = 32,   /* 10GBASE-R PCS status 1 register */
++	XS_LN_STAT  = 24    /* XS lane status register */
++};
++
++enum {
+ 	AEL100X_TX_DISABLE = 9,
+ 	AEL100X_TX_CONFIG1 = 0xc002,
+ 	AEL1002_PWR_DOWN_HI = 0xc011,
+ 	AEL1002_PWR_DOWN_LO = 0xc012,
+ 	AEL1002_XFI_EQL = 0xc015,
+ 	AEL1002_LB_EN = 0xc017,
++	AEL_OPT_SETTINGS = 0xc017,
++	AEL_I2C_CTRL = 0xc30a,
++	AEL_I2C_DATA = 0xc30b,
++	AEL_I2C_STAT = 0xc30c,
++	AEL2005_GPIO_CTRL = 0xc214,
++	AEL2005_GPIO_STAT = 0xc215,
++};
++
++enum { edc_none, edc_sr, edc_twinax };
+ 
+-	LASI_CTRL = 0x9002,
+-	LASI_STAT = 0x9005
++/* PHY module I2C device address */
++#define MODULE_DEV_ADDR 0xa0
++
++#define AEL2005_MODDET_IRQ 4
++
++struct reg_val {
++	unsigned short mmd_addr;
++	unsigned short reg_addr;
++	unsigned short clear_bits;
++	unsigned short set_bits;
+ };
+ 
++static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
++{
++	int err;
++
++	for (err = 0; rv->mmd_addr && !err; rv++) {
++		if (rv->clear_bits == 0xffff)
++			err = mdio_write(phy, rv->mmd_addr, rv->reg_addr,
++					 rv->set_bits);
++		else
++			err = t3_mdio_change_bits(phy, rv->mmd_addr,
++						  rv->reg_addr, rv->clear_bits,
++						  rv->set_bits);
++	}
++	return err;
++}
++
+ static void ael100x_txon(struct cphy *phy)
+ {
+ 	int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
+@@ -84,23 +124,23 @@ static int ael1002_intr_noop(struct cphy *phy)
+ 	return 0;
+ }
+ 
+-static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
+-				   int *speed, int *duplex, int *fc)
++/*
++ * Get link status for a 10GBASE-R device.
++ */
++static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
++			     int *duplex, int *fc)
+ {
+ 	if (link_ok) {
+-		unsigned int status;
+-		int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
+-
+-		/*
+-		 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+-		 * once more to get the current link state.
+-		 */
+-		if (!err && !(status & BMSR_LSTATUS))
+-			err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
+-					&status);
++		unsigned int stat0, stat1, stat2;
++		int err = mdio_read(phy, MDIO_DEV_PMA_PMD, PMD_RSD, &stat0);
++
++		if (!err)
++			err = mdio_read(phy, MDIO_DEV_PCS, PCS_STAT1_R, &stat1);
++		if (!err)
++			err = mdio_read(phy, MDIO_DEV_XGXS, XS_LN_STAT, &stat2);
+ 		if (err)
+ 			return err;
+-		*link_ok = !!(status & BMSR_LSTATUS);
++		*link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1;
+ 	}
+ 	if (speed)
+ 		*speed = SPEED_10000;
+@@ -115,15 +155,18 @@ static struct cphy_ops ael1002_ops = {
+ 	.intr_disable = ael1002_intr_noop,
+ 	.intr_clear = ael1002_intr_noop,
+ 	.intr_handler = ael1002_intr_noop,
+-	.get_link_status = ael100x_get_link_status,
++	.get_link_status = get_link_status_r,
+ 	.power_down = ael1002_power_down,
+ };
+ 
+-void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			 int phy_addr, const struct mdio_ops *mdio_ops)
++int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *mdio_ops)
+ {
+-	cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
++	cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops,
++		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
++		   "10GBASE-R");
+ 	ael100x_txon(phy);
++	return 0;
+ }
+ 
+ static int ael1006_reset(struct cphy *phy, int wait)
+@@ -131,72 +174,985 @@ static int ael1006_reset(struct cphy *phy, int wait)
+ 	return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
+ }
+ 
+-static int ael1006_intr_enable(struct cphy *phy)
++static int ael1006_power_down(struct cphy *phy, int enable)
+ {
+-	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
++	return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
++				   BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
+ }
+ 
+-static int ael1006_intr_disable(struct cphy *phy)
++static struct cphy_ops ael1006_ops = {
++	.reset = ael1006_reset,
++	.intr_enable = t3_phy_lasi_intr_enable,
++	.intr_disable = t3_phy_lasi_intr_disable,
++	.intr_clear = t3_phy_lasi_intr_clear,
++	.intr_handler = t3_phy_lasi_intr_handler,
++	.get_link_status = get_link_status_r,
++	.power_down = ael1006_power_down,
++};
++
++int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
++			     int phy_addr, const struct mdio_ops *mdio_ops)
+ {
+-	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
++	cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops,
++		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
++		   "10GBASE-SR");
++	ael100x_txon(phy);
++	return 0;
+ }
+ 
+-static int ael1006_intr_clear(struct cphy *phy)
++static int ael2005_setup_sr_edc(struct cphy *phy)
+ {
+-	u32 val;
++	static struct reg_val regs[] = {
++		{ MDIO_DEV_PMA_PMD, 0xc003, 0xffff, 0x181 },
++		{ MDIO_DEV_PMA_PMD, 0xc010, 0xffff, 0x448a },
++		{ MDIO_DEV_PMA_PMD, 0xc04a, 0xffff, 0x5200 },
++		{ 0, 0, 0, 0 }
++	};
++	static u16 sr_edc[] = {
++		0xcc00, 0x2ff4,
++		0xcc01, 0x3cd4,
++		0xcc02, 0x2015,
++		0xcc03, 0x3105,
++		0xcc04, 0x6524,
++		0xcc05, 0x27ff,
++		0xcc06, 0x300f,
++		0xcc07, 0x2c8b,
++		0xcc08, 0x300b,
++		0xcc09, 0x4009,
++		0xcc0a, 0x400e,
++		0xcc0b, 0x2f72,
++		0xcc0c, 0x3002,
++		0xcc0d, 0x1002,
++		0xcc0e, 0x2172,
++		0xcc0f, 0x3012,
++		0xcc10, 0x1002,
++		0xcc11, 0x25d2,
++		0xcc12, 0x3012,
++		0xcc13, 0x1002,
++		0xcc14, 0xd01e,
++		0xcc15, 0x27d2,
++		0xcc16, 0x3012,
++		0xcc17, 0x1002,
++		0xcc18, 0x2004,
++		0xcc19, 0x3c84,
++		0xcc1a, 0x6436,
++		0xcc1b, 0x2007,
++		0xcc1c, 0x3f87,
++		0xcc1d, 0x8676,
++		0xcc1e, 0x40b7,
++		0xcc1f, 0xa746,
++		0xcc20, 0x4047,
++		0xcc21, 0x5673,
++		0xcc22, 0x2982,
++		0xcc23, 0x3002,
++		0xcc24, 0x13d2,
++		0xcc25, 0x8bbd,
++		0xcc26, 0x2862,
++		0xcc27, 0x3012,
++		0xcc28, 0x1002,
++		0xcc29, 0x2092,
++		0xcc2a, 0x3012,
++		0xcc2b, 0x1002,
++		0xcc2c, 0x5cc3,
++		0xcc2d, 0x314,
++		0xcc2e, 0x2942,
++		0xcc2f, 0x3002,
++		0xcc30, 0x1002,
++		0xcc31, 0xd019,
++		0xcc32, 0x2032,
++		0xcc33, 0x3012,
++		0xcc34, 0x1002,
++		0xcc35, 0x2a04,
++		0xcc36, 0x3c74,
++		0xcc37, 0x6435,
++		0xcc38, 0x2fa4,
++		0xcc39, 0x3cd4,
++		0xcc3a, 0x6624,
++		0xcc3b, 0x5563,
++		0xcc3c, 0x2d42,
++		0xcc3d, 0x3002,
++		0xcc3e, 0x13d2,
++		0xcc3f, 0x464d,
++		0xcc40, 0x2862,
++		0xcc41, 0x3012,
++		0xcc42, 0x1002,
++		0xcc43, 0x2032,
++		0xcc44, 0x3012,
++		0xcc45, 0x1002,
++		0xcc46, 0x2fb4,
++		0xcc47, 0x3cd4,
++		0xcc48, 0x6624,
++		0xcc49, 0x5563,
++		0xcc4a, 0x2d42,
++		0xcc4b, 0x3002,
++		0xcc4c, 0x13d2,
++		0xcc4d, 0x2ed2,
++		0xcc4e, 0x3002,
++		0xcc4f, 0x1002,
++		0xcc50, 0x2fd2,
++		0xcc51, 0x3002,
++		0xcc52, 0x1002,
++		0xcc53, 0x004,
++		0xcc54, 0x2942,
++		0xcc55, 0x3002,
++		0xcc56, 0x1002,
++		0xcc57, 0x2092,
++		0xcc58, 0x3012,
++		0xcc59, 0x1002,
++		0xcc5a, 0x5cc3,
++		0xcc5b, 0x317,
++		0xcc5c, 0x2f72,
++		0xcc5d, 0x3002,
++		0xcc5e, 0x1002,
++		0xcc5f, 0x2942,
++		0xcc60, 0x3002,
++		0xcc61, 0x1002,
++		0xcc62, 0x22cd,
++		0xcc63, 0x301d,
++		0xcc64, 0x2862,
++		0xcc65, 0x3012,
++		0xcc66, 0x1002,
++		0xcc67, 0x2ed2,
++		0xcc68, 0x3002,
++		0xcc69, 0x1002,
++		0xcc6a, 0x2d72,
++		0xcc6b, 0x3002,
++		0xcc6c, 0x1002,
++		0xcc6d, 0x628f,
++		0xcc6e, 0x2112,
++		0xcc6f, 0x3012,
++		0xcc70, 0x1002,
++		0xcc71, 0x5aa3,
++		0xcc72, 0x2dc2,
++		0xcc73, 0x3002,
++		0xcc74, 0x1312,
++		0xcc75, 0x6f72,
++		0xcc76, 0x1002,
++		0xcc77, 0x2807,
++		0xcc78, 0x31a7,
++		0xcc79, 0x20c4,
++		0xcc7a, 0x3c24,
++		0xcc7b, 0x6724,
++		0xcc7c, 0x1002,
++		0xcc7d, 0x2807,
++		0xcc7e, 0x3187,
++		0xcc7f, 0x20c4,
++		0xcc80, 0x3c24,
++		0xcc81, 0x6724,
++		0xcc82, 0x1002,
++		0xcc83, 0x2514,
++		0xcc84, 0x3c64,
++		0xcc85, 0x6436,
++		0xcc86, 0xdff4,
++		0xcc87, 0x6436,
++		0xcc88, 0x1002,
++		0xcc89, 0x40a4,
++		0xcc8a, 0x643c,
++		0xcc8b, 0x4016,
++		0xcc8c, 0x8c6c,
++		0xcc8d, 0x2b24,
++		0xcc8e, 0x3c24,
++		0xcc8f, 0x6435,
++		0xcc90, 0x1002,
++		0xcc91, 0x2b24,
++		0xcc92, 0x3c24,
++		0xcc93, 0x643a,
++		0xcc94, 0x4025,
++		0xcc95, 0x8a5a,
++		0xcc96, 0x1002,
++		0xcc97, 0x2731,
++		0xcc98, 0x3011,
++		0xcc99, 0x1001,
++		0xcc9a, 0xc7a0,
++		0xcc9b, 0x100,
++		0xcc9c, 0xc502,
++		0xcc9d, 0x53ac,
++		0xcc9e, 0xc503,
++		0xcc9f, 0xd5d5,
++		0xcca0, 0xc600,
++		0xcca1, 0x2a6d,
++		0xcca2, 0xc601,
++		0xcca3, 0x2a4c,
++		0xcca4, 0xc602,
++		0xcca5, 0x111,
++		0xcca6, 0xc60c,
++		0xcca7, 0x5900,
++		0xcca8, 0xc710,
++		0xcca9, 0x700,
++		0xccaa, 0xc718,
++		0xccab, 0x700,
++		0xccac, 0xc720,
++		0xccad, 0x4700,
++		0xccae, 0xc801,
++		0xccaf, 0x7f50,
++		0xccb0, 0xc802,
++		0xccb1, 0x7760,
++		0xccb2, 0xc803,
++		0xccb3, 0x7fce,
++		0xccb4, 0xc804,
++		0xccb5, 0x5700,
++		0xccb6, 0xc805,
++		0xccb7, 0x5f11,
++		0xccb8, 0xc806,
++		0xccb9, 0x4751,
++		0xccba, 0xc807,
++		0xccbb, 0x57e1,
++		0xccbc, 0xc808,
++		0xccbd, 0x2700,
++		0xccbe, 0xc809,
++		0xccbf, 0x000,
++		0xccc0, 0xc821,
++		0xccc1, 0x002,
++		0xccc2, 0xc822,
++		0xccc3, 0x014,
++		0xccc4, 0xc832,
++		0xccc5, 0x1186,
++		0xccc6, 0xc847,
++		0xccc7, 0x1e02,
++		0xccc8, 0xc013,
++		0xccc9, 0xf341,
++		0xccca, 0xc01a,
++		0xcccb, 0x446,
++		0xcccc, 0xc024,
++		0xcccd, 0x1000,
++		0xccce, 0xc025,
++		0xcccf, 0xa00,
++		0xccd0, 0xc026,
++		0xccd1, 0xc0c,
++		0xccd2, 0xc027,
++		0xccd3, 0xc0c,
++		0xccd4, 0xc029,
++		0xccd5, 0x0a0,
++		0xccd6, 0xc030,
++		0xccd7, 0xa00,
++		0xccd8, 0xc03c,
++		0xccd9, 0x01c,
++		0xccda, 0xc005,
++		0xccdb, 0x7a06,
++		0xccdc, 0x000,
++		0xccdd, 0x2731,
++		0xccde, 0x3011,
++		0xccdf, 0x1001,
++		0xcce0, 0xc620,
++		0xcce1, 0x000,
++		0xcce2, 0xc621,
++		0xcce3, 0x03f,
++		0xcce4, 0xc622,
++		0xcce5, 0x000,
++		0xcce6, 0xc623,
++		0xcce7, 0x000,
++		0xcce8, 0xc624,
++		0xcce9, 0x000,
++		0xccea, 0xc625,
++		0xcceb, 0x000,
++		0xccec, 0xc627,
++		0xcced, 0x000,
++		0xccee, 0xc628,
++		0xccef, 0x000,
++		0xccf0, 0xc62c,
++		0xccf1, 0x000,
++		0xccf2, 0x000,
++		0xccf3, 0x2806,
++		0xccf4, 0x3cb6,
++		0xccf5, 0xc161,
++		0xccf6, 0x6134,
++		0xccf7, 0x6135,
++		0xccf8, 0x5443,
++		0xccf9, 0x303,
++		0xccfa, 0x6524,
++		0xccfb, 0x00b,
++		0xccfc, 0x1002,
++		0xccfd, 0x2104,
++		0xccfe, 0x3c24,
++		0xccff, 0x2105,
++		0xcd00, 0x3805,
++		0xcd01, 0x6524,
++		0xcd02, 0xdff4,
++		0xcd03, 0x4005,
++		0xcd04, 0x6524,
++		0xcd05, 0x1002,
++		0xcd06, 0x5dd3,
++		0xcd07, 0x306,
++		0xcd08, 0x2ff7,
++		0xcd09, 0x38f7,
++		0xcd0a, 0x60b7,
++		0xcd0b, 0xdffd,
++		0xcd0c, 0x00a,
++		0xcd0d, 0x1002,
++		0xcd0e, 0
++	};
++	int i, err;
++
++	err = set_phy_regs(phy, regs);
++	if (err)
++		return err;
+ 
+-	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
++	msleep(50);
++
++	for (i = 0; i < ARRAY_SIZE(sr_edc) && !err; i += 2)
++		err = mdio_write(phy, MDIO_DEV_PMA_PMD, sr_edc[i],
++				 sr_edc[i + 1]);
++	if (!err)
++		phy->priv = edc_sr;
++	return err;
+ }
+ 
+-static int ael1006_intr_handler(struct cphy *phy)
++static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
+ {
+-	unsigned int status;
+-	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
++	static struct reg_val regs[] = {
++		{ MDIO_DEV_PMA_PMD, 0xc04a, 0xffff, 0x5a00 },
++		{ 0, 0, 0, 0 }
++	};
++	static struct reg_val preemphasis[] = {
++		{ MDIO_DEV_PMA_PMD, 0xc014, 0xffff, 0xfe16 },
++		{ MDIO_DEV_PMA_PMD, 0xc015, 0xffff, 0xa000 },
++		{ 0, 0, 0, 0 }
++	};
++	static u16 twinax_edc[] = {
++		0xcc00, 0x4009,
++		0xcc01, 0x27ff,
++		0xcc02, 0x300f,
++		0xcc03, 0x40aa,
++		0xcc04, 0x401c,
++		0xcc05, 0x401e,
++		0xcc06, 0x2ff4,
++		0xcc07, 0x3cd4,
++		0xcc08, 0x2035,
++		0xcc09, 0x3145,
++		0xcc0a, 0x6524,
++		0xcc0b, 0x26a2,
++		0xcc0c, 0x3012,
++		0xcc0d, 0x1002,
++		0xcc0e, 0x29c2,
++		0xcc0f, 0x3002,
++		0xcc10, 0x1002,
++		0xcc11, 0x2072,
++		0xcc12, 0x3012,
++		0xcc13, 0x1002,
++		0xcc14, 0x22cd,
++		0xcc15, 0x301d,
++		0xcc16, 0x2e52,
++		0xcc17, 0x3012,
++		0xcc18, 0x1002,
++		0xcc19, 0x28e2,
++		0xcc1a, 0x3002,
++		0xcc1b, 0x1002,
++		0xcc1c, 0x628f,
++		0xcc1d, 0x2ac2,
++		0xcc1e, 0x3012,
++		0xcc1f, 0x1002,
++		0xcc20, 0x5553,
++		0xcc21, 0x2ae2,
++		0xcc22, 0x3002,
++		0xcc23, 0x1302,
++		0xcc24, 0x401e,
++		0xcc25, 0x2be2,
++		0xcc26, 0x3012,
++		0xcc27, 0x1002,
++		0xcc28, 0x2da2,
++		0xcc29, 0x3012,
++		0xcc2a, 0x1002,
++		0xcc2b, 0x2ba2,
++		0xcc2c, 0x3002,
++		0xcc2d, 0x1002,
++		0xcc2e, 0x5ee3,
++		0xcc2f, 0x305,
++		0xcc30, 0x400e,
++		0xcc31, 0x2bc2,
++		0xcc32, 0x3002,
++		0xcc33, 0x1002,
++		0xcc34, 0x2b82,
++		0xcc35, 0x3012,
++		0xcc36, 0x1002,
++		0xcc37, 0x5663,
++		0xcc38, 0x302,
++		0xcc39, 0x401e,
++		0xcc3a, 0x6f72,
++		0xcc3b, 0x1002,
++		0xcc3c, 0x628f,
++		0xcc3d, 0x2be2,
++		0xcc3e, 0x3012,
++		0xcc3f, 0x1002,
++		0xcc40, 0x22cd,
++		0xcc41, 0x301d,
++		0xcc42, 0x2e52,
++		0xcc43, 0x3012,
++		0xcc44, 0x1002,
++		0xcc45, 0x2522,
++		0xcc46, 0x3012,
++		0xcc47, 0x1002,
++		0xcc48, 0x2da2,
++		0xcc49, 0x3012,
++		0xcc4a, 0x1002,
++		0xcc4b, 0x2ca2,
++		0xcc4c, 0x3012,
++		0xcc4d, 0x1002,
++		0xcc4e, 0x2fa4,
++		0xcc4f, 0x3cd4,
++		0xcc50, 0x6624,
++		0xcc51, 0x410b,
++		0xcc52, 0x56b3,
++		0xcc53, 0x3c4,
++		0xcc54, 0x2fb2,
++		0xcc55, 0x3002,
++		0xcc56, 0x1002,
++		0xcc57, 0x220b,
++		0xcc58, 0x303b,
++		0xcc59, 0x56b3,
++		0xcc5a, 0x3c3,
++		0xcc5b, 0x866b,
++		0xcc5c, 0x400c,
++		0xcc5d, 0x23a2,
++		0xcc5e, 0x3012,
++		0xcc5f, 0x1002,
++		0xcc60, 0x2da2,
++		0xcc61, 0x3012,
++		0xcc62, 0x1002,
++		0xcc63, 0x2ca2,
++		0xcc64, 0x3012,
++		0xcc65, 0x1002,
++		0xcc66, 0x2fb4,
++		0xcc67, 0x3cd4,
++		0xcc68, 0x6624,
++		0xcc69, 0x56b3,
++		0xcc6a, 0x3c3,
++		0xcc6b, 0x866b,
++		0xcc6c, 0x401c,
++		0xcc6d, 0x2205,
++		0xcc6e, 0x3035,
++		0xcc6f, 0x5b53,
++		0xcc70, 0x2c52,
++		0xcc71, 0x3002,
++		0xcc72, 0x13c2,
++		0xcc73, 0x5cc3,
++		0xcc74, 0x317,
++		0xcc75, 0x2522,
++		0xcc76, 0x3012,
++		0xcc77, 0x1002,
++		0xcc78, 0x2da2,
++		0xcc79, 0x3012,
++		0xcc7a, 0x1002,
++		0xcc7b, 0x2b82,
++		0xcc7c, 0x3012,
++		0xcc7d, 0x1002,
++		0xcc7e, 0x5663,
++		0xcc7f, 0x303,
++		0xcc80, 0x401e,
++		0xcc81, 0x004,
++		0xcc82, 0x2c42,
++		0xcc83, 0x3012,
++		0xcc84, 0x1002,
++		0xcc85, 0x6f72,
++		0xcc86, 0x1002,
++		0xcc87, 0x628f,
++		0xcc88, 0x2304,
++		0xcc89, 0x3c84,
++		0xcc8a, 0x6436,
++		0xcc8b, 0xdff4,
++		0xcc8c, 0x6436,
++		0xcc8d, 0x2ff5,
++		0xcc8e, 0x3005,
++		0xcc8f, 0x8656,
++		0xcc90, 0xdfba,
++		0xcc91, 0x56a3,
++		0xcc92, 0xd05a,
++		0xcc93, 0x21c2,
++		0xcc94, 0x3012,
++		0xcc95, 0x1392,
++		0xcc96, 0xd05a,
++		0xcc97, 0x56a3,
++		0xcc98, 0xdfba,
++		0xcc99, 0x383,
++		0xcc9a, 0x6f72,
++		0xcc9b, 0x1002,
++		0xcc9c, 0x28c5,
++		0xcc9d, 0x3005,
++		0xcc9e, 0x4178,
++		0xcc9f, 0x5653,
++		0xcca0, 0x384,
++		0xcca1, 0x22b2,
++		0xcca2, 0x3012,
++		0xcca3, 0x1002,
++		0xcca4, 0x2be5,
++		0xcca5, 0x3005,
++		0xcca6, 0x41e8,
++		0xcca7, 0x5653,
++		0xcca8, 0x382,
++		0xcca9, 0x002,
++		0xccaa, 0x4258,
++		0xccab, 0x2474,
++		0xccac, 0x3c84,
++		0xccad, 0x6437,
++		0xccae, 0xdff4,
++		0xccaf, 0x6437,
++		0xccb0, 0x2ff5,
++		0xccb1, 0x3c05,
++		0xccb2, 0x8757,
++		0xccb3, 0xb888,
++		0xccb4, 0x9787,
++		0xccb5, 0xdff4,
++		0xccb6, 0x6724,
++		0xccb7, 0x866a,
++		0xccb8, 0x6f72,
++		0xccb9, 0x1002,
++		0xccba, 0x2d01,
++		0xccbb, 0x3011,
++		0xccbc, 0x1001,
++		0xccbd, 0xc620,
++		0xccbe, 0x14e5,
++		0xccbf, 0xc621,
++		0xccc0, 0xc53d,
++		0xccc1, 0xc622,
++		0xccc2, 0x3cbe,
++		0xccc3, 0xc623,
++		0xccc4, 0x4452,
++		0xccc5, 0xc624,
++		0xccc6, 0xc5c5,
++		0xccc7, 0xc625,
++		0xccc8, 0xe01e,
++		0xccc9, 0xc627,
++		0xccca, 0x000,
++		0xcccb, 0xc628,
++		0xcccc, 0x000,
++		0xcccd, 0xc62b,
++		0xccce, 0x000,
++		0xcccf, 0xc62c,
++		0xccd0, 0x000,
++		0xccd1, 0x000,
++		0xccd2, 0x2d01,
++		0xccd3, 0x3011,
++		0xccd4, 0x1001,
++		0xccd5, 0xc620,
++		0xccd6, 0x000,
++		0xccd7, 0xc621,
++		0xccd8, 0x000,
++		0xccd9, 0xc622,
++		0xccda, 0x0ce,
++		0xccdb, 0xc623,
++		0xccdc, 0x07f,
++		0xccdd, 0xc624,
++		0xccde, 0x032,
++		0xccdf, 0xc625,
++		0xcce0, 0x000,
++		0xcce1, 0xc627,
++		0xcce2, 0x000,
++		0xcce3, 0xc628,
++		0xcce4, 0x000,
++		0xcce5, 0xc62b,
++		0xcce6, 0x000,
++		0xcce7, 0xc62c,
++		0xcce8, 0x000,
++		0xcce9, 0x000,
++		0xccea, 0x2d01,
++		0xcceb, 0x3011,
++		0xccec, 0x1001,
++		0xcced, 0xc502,
++		0xccee, 0x609f,
++		0xccef, 0xc600,
++		0xccf0, 0x2a6e,
++		0xccf1, 0xc601,
++		0xccf2, 0x2a2c,
++		0xccf3, 0xc60c,
++		0xccf4, 0x5400,
++		0xccf5, 0xc710,
++		0xccf6, 0x700,
++		0xccf7, 0xc718,
++		0xccf8, 0x700,
++		0xccf9, 0xc720,
++		0xccfa, 0x4700,
++		0xccfb, 0xc728,
++		0xccfc, 0x700,
++		0xccfd, 0xc729,
++		0xccfe, 0x1207,
++		0xccff, 0xc801,
++		0xcd00, 0x7f50,
++		0xcd01, 0xc802,
++		0xcd02, 0x7760,
++		0xcd03, 0xc803,
++		0xcd04, 0x7fce,
++		0xcd05, 0xc804,
++		0xcd06, 0x520e,
++		0xcd07, 0xc805,
++		0xcd08, 0x5c11,
++		0xcd09, 0xc806,
++		0xcd0a, 0x3c51,
++		0xcd0b, 0xc807,
++		0xcd0c, 0x4061,
++		0xcd0d, 0xc808,
++		0xcd0e, 0x49c1,
++		0xcd0f, 0xc809,
++		0xcd10, 0x3840,
++		0xcd11, 0xc80a,
++		0xcd12, 0x000,
++		0xcd13, 0xc821,
++		0xcd14, 0x002,
++		0xcd15, 0xc822,
++		0xcd16, 0x046,
++		0xcd17, 0xc844,
++		0xcd18, 0x182f,
++		0xcd19, 0xc013,
++		0xcd1a, 0xf341,
++		0xcd1b, 0xc01a,
++		0xcd1c, 0x446,
++		0xcd1d, 0xc024,
++		0xcd1e, 0x1000,
++		0xcd1f, 0xc025,
++		0xcd20, 0xa00,
++		0xcd21, 0xc026,
++		0xcd22, 0xc0c,
++		0xcd23, 0xc027,
++		0xcd24, 0xc0c,
++		0xcd25, 0xc029,
++		0xcd26, 0x0a0,
++		0xcd27, 0xc030,
++		0xcd28, 0xa00,
++		0xcd29, 0xc03c,
++		0xcd2a, 0x01c,
++		0xcd2b, 0x000,
++		0xcd2c, 0x2b84,
++		0xcd2d, 0x3c74,
++		0xcd2e, 0x6435,
++		0xcd2f, 0xdff4,
++		0xcd30, 0x6435,
++		0xcd31, 0x2806,
++		0xcd32, 0x3006,
++		0xcd33, 0x8565,
++		0xcd34, 0x2b24,
++		0xcd35, 0x3c24,
++		0xcd36, 0x6436,
++		0xcd37, 0x1002,
++		0xcd38, 0x2b24,
++		0xcd39, 0x3c24,
++		0xcd3a, 0x6436,
++		0xcd3b, 0x4045,
++		0xcd3c, 0x8656,
++		0xcd3d, 0x1002,
++		0xcd3e, 0x2807,
++		0xcd3f, 0x31a7,
++		0xcd40, 0x20c4,
++		0xcd41, 0x3c24,
++		0xcd42, 0x6724,
++		0xcd43, 0x1002,
++		0xcd44, 0x2807,
++		0xcd45, 0x3187,
++		0xcd46, 0x20c4,
++		0xcd47, 0x3c24,
++		0xcd48, 0x6724,
++		0xcd49, 0x1002,
++		0xcd4a, 0x2514,
++		0xcd4b, 0x3c64,
++		0xcd4c, 0x6436,
++		0xcd4d, 0xdff4,
++		0xcd4e, 0x6436,
++		0xcd4f, 0x1002,
++		0xcd50, 0x2806,
++		0xcd51, 0x3cb6,
++		0xcd52, 0xc161,
++		0xcd53, 0x6134,
++		0xcd54, 0x6135,
++		0xcd55, 0x5443,
++		0xcd56, 0x303,
++		0xcd57, 0x6524,
++		0xcd58, 0x00b,
++		0xcd59, 0x1002,
++		0xcd5a, 0xd019,
++		0xcd5b, 0x2104,
++		0xcd5c, 0x3c24,
++		0xcd5d, 0x2105,
++		0xcd5e, 0x3805,
++		0xcd5f, 0x6524,
++		0xcd60, 0xdff4,
++		0xcd61, 0x4005,
++		0xcd62, 0x6524,
++		0xcd63, 0x2e8d,
++		0xcd64, 0x303d,
++		0xcd65, 0x5dd3,
++		0xcd66, 0x306,
++		0xcd67, 0x2ff7,
++		0xcd68, 0x38f7,
++		0xcd69, 0x60b7,
++		0xcd6a, 0xdffd,
++		0xcd6b, 0x00a,
++		0xcd6c, 0x1002,
++		0xcd6d, 0
++	};
++	int i, err;
+ 
++	err = set_phy_regs(phy, regs);
++	if (!err && modtype == phy_modtype_twinax_long)
++		err = set_phy_regs(phy, preemphasis);
+ 	if (err)
+ 		return err;
+-	return (status & 1) ? cphy_cause_link_change : 0;
++
++	msleep(50);
++
++	for (i = 0; i < ARRAY_SIZE(twinax_edc) && !err; i += 2)
++		err = mdio_write(phy, MDIO_DEV_PMA_PMD, twinax_edc[i],
++				 twinax_edc[i + 1]);
++	if (!err)
++		phy->priv = edc_twinax;
++	return err;
+ }
+ 
+-static int ael1006_power_down(struct cphy *phy, int enable)
++static int ael2005_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
+ {
+-	return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
+-				   BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
++	int i, err;
++	unsigned int stat, data;
++
++	err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL_I2C_CTRL,
++			 (dev_addr << 8) | (1 << 8) | word_addr);
++	if (err)
++		return err;
++
++	for (i = 0; i < 5; i++) {
++		msleep(1);
++		err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL_I2C_STAT, &stat);
++		if (err)
++			return err;
++		if ((stat & 3) == 1) {
++			err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL_I2C_DATA,
++					&data);
++			if (err)
++				return err;
++			return data >> 8;
++		}
++	}
++	CH_WARN(phy->adapter, "PHY %u I2C read of addr %u timed out\n",
++		phy->addr, word_addr);
++	return -ETIMEDOUT;
+ }
+ 
+-static struct cphy_ops ael1006_ops = {
+-	.reset = ael1006_reset,
+-	.intr_enable = ael1006_intr_enable,
+-	.intr_disable = ael1006_intr_disable,
+-	.intr_clear = ael1006_intr_clear,
+-	.intr_handler = ael1006_intr_handler,
+-	.get_link_status = ael100x_get_link_status,
+-	.power_down = ael1006_power_down,
++static int get_module_type(struct cphy *phy, int delay_ms)
++{
++	int v;
++	unsigned int stat;
++
++	v = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, &stat);
++	if (v)
++		return v;
++
++	if (stat & (1 << 8))			/* module absent */
++		return phy_modtype_none;
++
++	if (delay_ms)
++		msleep(delay_ms);
++
++	/* see SFF-8472 for below */
++	v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 3);
++	if (v < 0)
++		return v;
++
++	if (v == 0x10)
++		return phy_modtype_sr;
++	if (v == 0x20)
++		return phy_modtype_lr;
++	if (v == 0x40)
++		return phy_modtype_lrm;
++
++	v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 6);
++	if (v < 0)
++		return v;
++	if (v != 4)
++		goto unknown;
++
++	v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 10);
++	if (v < 0)
++		return v;
++
++	if (v & 0x80) {
++		v = ael2005_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
++		if (v < 0)
++			return v;
++		return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
++	}
++unknown:
++	return phy_modtype_unknown;
++}
++
++static int ael2005_intr_enable(struct cphy *phy)
++{
++	int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0x200);
++	return err ? err : t3_phy_lasi_intr_enable(phy);
++}
++
++static int ael2005_intr_disable(struct cphy *phy)
++{
++	int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0x100);
++	return err ? err : t3_phy_lasi_intr_disable(phy);
++}
++
++static int ael2005_intr_clear(struct cphy *phy)
++{
++	int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0xd00);
++	return err ? err : t3_phy_lasi_intr_clear(phy);
++}
++
++static int ael2005_reset(struct cphy *phy, int wait)
++{
++	static struct reg_val regs0[] = {
++		{ MDIO_DEV_PMA_PMD, 0xc001, 0, 1 << 5 },
++		{ MDIO_DEV_PMA_PMD, 0xc017, 0, 1 << 5 },
++		{ MDIO_DEV_PMA_PMD, 0xc013, 0xffff, 0xf341 },
++		{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 },
++		{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8100 },
++		{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 },
++		{ MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0 },
++		{ 0, 0, 0, 0 }
++	};
++	static struct reg_val regs1[] = {
++		{ MDIO_DEV_PMA_PMD, 0xca00, 0xffff, 0x0080 },
++		{ MDIO_DEV_PMA_PMD, 0xca12, 0xffff, 0 },
++		{ 0, 0, 0, 0 }
++	};
++
++	int err, lasi_ctrl;
++
++	err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, &lasi_ctrl);
++	if (err)
++		return err;
++
++	err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, 0);
++	if (err)
++		return err;
++
++	msleep(125);
++	phy->priv = edc_none;
++	err = set_phy_regs(phy, regs0);
++	if (err)
++		return err;
++
++	msleep(50);
++
++	err = get_module_type(phy, 0);
++	if (err < 0)
++		return err;
++	phy->modtype = err;
++
++	if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
++		err = ael2005_setup_twinax_edc(phy, err);
++	else
++		err = ael2005_setup_sr_edc(phy);
++	if (err)
++		return err;
++
++	err = set_phy_regs(phy, regs1);
++	if (err)
++		return err;
++
++	/* reset wipes out interrupts, reenable them if they were on */
++	if (lasi_ctrl & 1)
++		err = ael2005_intr_enable(phy);
++	return err;
++}
++
++static int ael2005_intr_handler(struct cphy *phy)
++{
++	unsigned int stat;
++	int ret, edc_needed, cause = 0;
++
++	ret = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_STAT, &stat);
++	if (ret)
++		return ret;
++
++	if (stat & AEL2005_MODDET_IRQ) {
++		ret = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL,
++				 0xd00);
++		if (ret)
++			return ret;
++
++		/* modules have max 300 ms init time after hot plug */
++		ret = get_module_type(phy, 300);
++		if (ret < 0)
++			return ret;
++
++		phy->modtype = ret;
++		if (ret == phy_modtype_none)
++			edc_needed = phy->priv;       /* on unplug retain EDC */
++		else if (ret == phy_modtype_twinax ||
++			 ret == phy_modtype_twinax_long)
++			edc_needed = edc_twinax;
++		else
++			edc_needed = edc_sr;
++
++		if (edc_needed != phy->priv) {
++			ret = ael2005_reset(phy, 0);
++			return ret ? ret : cphy_cause_module_change;
++		}
++		cause = cphy_cause_module_change;
++	}
++
++	ret = t3_phy_lasi_intr_handler(phy);
++	if (ret < 0)
++		return ret;
++
++	ret |= cause;
++	return ret ? ret : cphy_cause_link_change;
++}
++
++static struct cphy_ops ael2005_ops = {
++	.reset           = ael2005_reset,
++	.intr_enable     = ael2005_intr_enable,
++	.intr_disable    = ael2005_intr_disable,
++	.intr_clear      = ael2005_intr_clear,
++	.intr_handler    = ael2005_intr_handler,
++	.get_link_status = get_link_status_r,
++	.power_down      = ael1002_power_down,
+ };
+ 
+-void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			 int phy_addr, const struct mdio_ops *mdio_ops)
++int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *mdio_ops)
+ {
+-	cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
+-	ael100x_txon(phy);
++	cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops,
++		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
++		  SUPPORTED_IRQ, "10GBASE-R");
++	msleep(125);
++	return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL_OPT_SETTINGS, 0,
++				   1 << 5);
++}
++
++/*
++ * Get link status for a 10GBASE-X device.
++ */
++static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
++			     int *duplex, int *fc)
++{
++	if (link_ok) {
++		unsigned int stat0, stat1, stat2;
++		int err = mdio_read(phy, MDIO_DEV_PMA_PMD, PMD_RSD, &stat0);
++
++		if (!err)
++			err = mdio_read(phy, MDIO_DEV_PCS, PCS_STAT1_X, &stat1);
++		if (!err)
++			err = mdio_read(phy, MDIO_DEV_XGXS, XS_LN_STAT, &stat2);
++		if (err)
++			return err;
++		*link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1;
++	}
++	if (speed)
++		*speed = SPEED_10000;
++	if (duplex)
++		*duplex = DUPLEX_FULL;
++	return 0;
+ }
+ 
+ static struct cphy_ops qt2045_ops = {
+ 	.reset = ael1006_reset,
+-	.intr_enable = ael1006_intr_enable,
+-	.intr_disable = ael1006_intr_disable,
+-	.intr_clear = ael1006_intr_clear,
+-	.intr_handler = ael1006_intr_handler,
+-	.get_link_status = ael100x_get_link_status,
++	.intr_enable = t3_phy_lasi_intr_enable,
++	.intr_disable = t3_phy_lasi_intr_disable,
++	.intr_clear = t3_phy_lasi_intr_clear,
++	.intr_handler = t3_phy_lasi_intr_handler,
++	.get_link_status = get_link_status_x,
+ 	.power_down = ael1006_power_down,
+ };
+ 
+-void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			int phy_addr, const struct mdio_ops *mdio_ops)
++int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
++		       int phy_addr, const struct mdio_ops *mdio_ops)
+ {
+ 	unsigned int stat;
+ 
+-	cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
++	cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops,
++		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
++		  "10GBASE-CX4");
+ 
+ 	/*
+ 	 * Some cards where the PHY is supposed to be at address 0 actually
+@@ -205,6 +1161,7 @@ void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
+ 	if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
+ 	    stat == 0xffff)
+ 		phy->addr = 1;
++	return 0;
+ }
+ 
+ static int xaui_direct_reset(struct cphy *phy, int wait)
+@@ -250,8 +1207,11 @@ static struct cphy_ops xaui_direct_ops = {
+ 	.power_down = xaui_direct_power_down,
+ };
+ 
+-void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			     int phy_addr, const struct mdio_ops *mdio_ops)
++int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
++			    int phy_addr, const struct mdio_ops *mdio_ops)
+ {
+-	cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops);
++	cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
++		  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
++		  "10GBASE-CX4");
++	return 0;
+ }
+diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
+index 9ecf8a6..d5e9bf7 100644
+--- a/drivers/net/cxgb3/common.h
++++ b/drivers/net/cxgb3/common.h
+@@ -193,22 +193,13 @@ struct mdio_ops {
+ struct adapter_info {
+ 	unsigned char nports;	/* # of ports */
+ 	unsigned char phy_base_addr;	/* MDIO PHY base address */
+-	unsigned char mdien;
+-	unsigned char mdiinv;
+ 	unsigned int gpio_out;	/* GPIO output settings */
+-	unsigned int gpio_intr;	/* GPIO IRQ enable mask */
++	unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
+ 	unsigned long caps;	/* adapter capabilities */
+ 	const struct mdio_ops *mdio_ops;	/* MDIO operations */
+ 	const char *desc;	/* product description */
+ };
+ 
+-struct port_type_info {
+-	void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
+-			 int phy_addr, const struct mdio_ops *ops);
+-	unsigned int caps;
+-	const char *desc;
+-};
+-
+ struct mc5_stats {
+ 	unsigned long parity_err;
+ 	unsigned long active_rgn_full;
+@@ -525,12 +516,25 @@ enum {
+ 	MAC_RXFIFO_SIZE = 32768
+ };
+ 
+-/* IEEE 802.3ae specified MDIO devices */
++/* IEEE 802.3 specified MDIO devices */
+ enum {
+ 	MDIO_DEV_PMA_PMD = 1,
+ 	MDIO_DEV_WIS = 2,
+ 	MDIO_DEV_PCS = 3,
+-	MDIO_DEV_XGXS = 4
++	MDIO_DEV_XGXS = 4,
++	MDIO_DEV_ANEG = 7,
++	MDIO_DEV_VEND1 = 30,
++	MDIO_DEV_VEND2 = 31
++};
++
++/* LASI control and status registers */
++enum {
++	RX_ALARM_CTRL = 0x9000,
++	TX_ALARM_CTRL = 0x9001,
++	LASI_CTRL = 0x9002,
++	RX_ALARM_STAT = 0x9003,
++	TX_ALARM_STAT = 0x9004,
++	LASI_STAT = 0x9005
+ };
+ 
+ /* PHY loopback direction */
+@@ -542,12 +546,23 @@ enum {
+ /* PHY interrupt types */
+ enum {
+ 	cphy_cause_link_change = 1,
+-	cphy_cause_fifo_error = 2
++	cphy_cause_fifo_error = 2,
++	cphy_cause_module_change = 4,
++};
++
++/* PHY module types */
++enum {
++	phy_modtype_none,
++	phy_modtype_sr,
++	phy_modtype_lr,
++	phy_modtype_lrm,
++	phy_modtype_twinax,
++	phy_modtype_twinax_long,
++	phy_modtype_unknown
+ };
+ 
+ /* PHY operations */
+ struct cphy_ops {
+-	void (*destroy)(struct cphy *phy);
+ 	int (*reset)(struct cphy *phy, int wait);
+ 
+ 	int (*intr_enable)(struct cphy *phy);
+@@ -568,8 +583,12 @@ struct cphy_ops {
+ 
+ /* A PHY instance */
+ struct cphy {
+-	int addr;		/* PHY address */
++	u8 addr;			/* PHY address */
++	u8 modtype;			/* PHY module type */
++	short priv;			/* scratch pad */
++	unsigned int caps;		/* PHY capabilities */
+ 	struct adapter *adapter;	/* associated adapter */
++	const char *desc;		/* PHY description */
+ 	unsigned long fifo_errors;	/* FIFO over/under-flows */
+ 	const struct cphy_ops *ops;	/* PHY operations */
+ 	int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
+@@ -594,10 +613,13 @@ static inline int mdio_write(struct cphy *phy, int mmd, int reg,
+ /* Convenience initializer */
+ static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
+ 			     int phy_addr, struct cphy_ops *phy_ops,
+-			     const struct mdio_ops *mdio_ops)
++			     const struct mdio_ops *mdio_ops,
++			      unsigned int caps, const char *desc)
+ {
+-	phy->adapter = adapter;
+ 	phy->addr = phy_addr;
++	phy->caps = caps;
++	phy->adapter = adapter;
++	phy->desc = desc;
+ 	phy->ops = phy_ops;
+ 	if (mdio_ops) {
+ 		phy->mdio_read = mdio_ops->read;
+@@ -669,6 +691,10 @@ int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+ int t3_phy_reset(struct cphy *phy, int mmd, int wait);
+ int t3_phy_advertise(struct cphy *phy, unsigned int advert);
+ int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
++int t3_phy_lasi_intr_enable(struct cphy *phy);
++int t3_phy_lasi_intr_disable(struct cphy *phy);
++int t3_phy_lasi_intr_clear(struct cphy *phy);
++int t3_phy_lasi_intr_handler(struct cphy *phy);
+ 
+ void t3_intr_enable(struct adapter *adapter);
+ void t3_intr_disable(struct adapter *adapter);
+@@ -774,14 +800,16 @@ int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
+ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+ 		      unsigned int credits);
+ 
+-void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			 int phy_addr, const struct mdio_ops *mdio_ops);
+-void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			 int phy_addr, const struct mdio_ops *mdio_ops);
+-void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			 int phy_addr, const struct mdio_ops *mdio_ops);
+-void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+-			const struct mdio_ops *mdio_ops);
+-void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			     int phy_addr, const struct mdio_ops *mdio_ops);
++int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *mdio_ops);
++int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *mdio_ops);
++int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *mdio_ops);
++int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *mdio_ops);
++int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
++		       const struct mdio_ops *mdio_ops);
++int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
++			    int phy_addr, const struct mdio_ops *mdio_ops);
+ #endif				/* __CHELSIO_COMMON_H */
+diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
+index 5447f3e..8de820e 100644
+--- a/drivers/net/cxgb3/cxgb3_main.c
++++ b/drivers/net/cxgb3/cxgb3_main.c
+@@ -208,6 +208,31 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
+ 	}
+ }
+ 
++/**
++ *	t3_os_phymod_changed - handle PHY module changes
++ *	@phy: the PHY reporting the module change
++ *	@mod_type: new module type
++ *
++ *	This is the OS-dependent handler for PHY module changes.  It is
++ *	invoked when a PHY module is removed or inserted for any OS-specific
++ *	processing.
++ */
++void t3_os_phymod_changed(struct adapter *adap, int port_id)
++{
++	static const char *mod_str[] = {
++		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
++	};
++
++	const struct net_device *dev = adap->port[port_id];
++	const struct port_info *pi = netdev_priv(dev);
++
++	if (pi->phy.modtype == phy_modtype_none)
++		printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
++	else
++		printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
++		       mod_str[pi->phy.modtype]);
++}
++
+ static void cxgb_set_rxmode(struct net_device *dev)
+ {
+ 	struct t3_rx_mode rm;
+@@ -1485,11 +1510,22 @@ static int speed_duplex_to_caps(int speed, int duplex)
+ 
+ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
++	int cap;
+ 	struct port_info *p = netdev_priv(dev);
+ 	struct link_config *lc = &p->link_config;
+ 
+-	if (!(lc->supported & SUPPORTED_Autoneg))
+-		return -EOPNOTSUPP;	/* can't change speed/duplex */
++	if (!(lc->supported & SUPPORTED_Autoneg)) {
++		/*
++		 * PHY offers a single speed/duplex. See if that's what's
++		 * being requested.
++		 */
++		if (cmd->autoneg == AUTONEG_DISABLE) {
++			cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
++			if (lc->supported & cap)
++				return 0;
++		}
++		return -EINVAL;	
++	}
+ 
+ 	if (cmd->autoneg == AUTONEG_DISABLE) {
+ 		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+@@ -2117,7 +2153,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+ 			mmd = data->phy_id >> 8;
+ 			if (!mmd)
+ 				mmd = MDIO_DEV_PCS;
+-			else if (mmd > MDIO_DEV_XGXS)
++			else if (mmd > MDIO_DEV_VEND2)
+ 				return -EINVAL;
+ 
+ 			ret =
+@@ -2143,7 +2179,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+ 			mmd = data->phy_id >> 8;
+ 			if (!mmd)
+ 				mmd = MDIO_DEV_PCS;
+-			else if (mmd > MDIO_DEV_XGXS)
++			else if (mmd > MDIO_DEV_VEND2)
+ 				return -EINVAL;
+ 
+ 			ret =
+@@ -2290,7 +2326,7 @@ static void check_link_status(struct adapter *adapter)
+ 		struct net_device *dev = adapter->port[i];
+ 		struct port_info *p = netdev_priv(dev);
+ 
+-		if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
++		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
+ 			t3_link_changed(adapter, i);
+ 	}
+ }
+@@ -2564,7 +2600,7 @@ static void __devinit print_port_info(struct adapter *adap,
+ 		if (!test_bit(i, &adap->registered_device_map))
+ 			continue;
+ 		printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
+-		       dev->name, ai->desc, pi->port_type->desc,
++		       dev->name, ai->desc, pi->phy.desc,
+ 		       is_offload(adap) ? "R" : "", adap->params.rev, buf,
+ 		       (adap->flags & USING_MSIX) ? " MSI-X" :
+ 		       (adap->flags & USING_MSI) ? " MSI" : "");
+diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
+index 4bda27c..a035d5c 100644
+--- a/drivers/net/cxgb3/regs.h
++++ b/drivers/net/cxgb3/regs.h
+@@ -573,6 +573,10 @@
+ #define V_GPIO10(x) ((x) << S_GPIO10)
+ #define F_GPIO10    V_GPIO10(1U)
+ 
++#define S_GPIO9    9
++#define V_GPIO9(x) ((x) << S_GPIO9)
++#define F_GPIO9    V_GPIO9(1U)
++
+ #define S_GPIO7    7
+ #define V_GPIO7(x) ((x) << S_GPIO7)
+ #define F_GPIO7    V_GPIO7(1U)
+diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
+index 2b0e463..bf5c076 100644
+--- a/drivers/net/cxgb3/t3_hw.c
++++ b/drivers/net/cxgb3/t3_hw.c
+@@ -194,21 +194,18 @@ int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+ static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
+ {
+ 	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
+-	u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
+-	    V_CLKDIV(clkdiv);
++	u32 val = F_PREEN | V_CLKDIV(clkdiv);
+ 
+-	if (!(ai->caps & SUPPORTED_10000baseT_Full))
+-		val |= V_ST(1);
+ 	t3_write_reg(adap, A_MI1_CFG, val);
+ }
+ 
+-#define MDIO_ATTEMPTS 10
++#define MDIO_ATTEMPTS 20
+ 
+ /*
+- * MI1 read/write operations for direct-addressed PHYs.
++ * MI1 read/write operations for clause 22 PHYs.
+  */
+-static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
+-		    int reg_addr, unsigned int *valp)
++static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
++		       int reg_addr, unsigned int *valp)
+ {
+ 	int ret;
+ 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+@@ -217,16 +214,17 @@ static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&adapter->mdio_lock);
++	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+ 	t3_write_reg(adapter, A_MI1_ADDR, addr);
+ 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
+-	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
++	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+ 	if (!ret)
+ 		*valp = t3_read_reg(adapter, A_MI1_DATA);
+ 	mutex_unlock(&adapter->mdio_lock);
+ 	return ret;
+ }
+ 
+-static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
++static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
+ 		     int reg_addr, unsigned int val)
+ {
+ 	int ret;
+@@ -236,37 +234,51 @@ static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&adapter->mdio_lock);
++	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+ 	t3_write_reg(adapter, A_MI1_ADDR, addr);
+ 	t3_write_reg(adapter, A_MI1_DATA, val);
+ 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+-	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
++	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+ 	mutex_unlock(&adapter->mdio_lock);
+ 	return ret;
+ }
+ 
+ static const struct mdio_ops mi1_mdio_ops = {
+-	mi1_read,
+-	mi1_write
++	t3_mi1_read,
++	t3_mi1_write
+ };
+ 
+ /*
++ * Performs the address cycle for clause 45 PHYs.
++ * Must be called with the MDIO_LOCK held.
++ */
++static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
++		       int reg_addr)
++{
++	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
++
++	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
++	t3_write_reg(adapter, A_MI1_ADDR, addr);
++	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
++	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
++	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
++			       MDIO_ATTEMPTS, 10);
++}
++
++/*
+  * MI1 read/write operations for indirect-addressed PHYs.
+  */
+ static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
+ 			int reg_addr, unsigned int *valp)
+ {
+ 	int ret;
+-	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+ 
+ 	mutex_lock(&adapter->mdio_lock);
+-	t3_write_reg(adapter, A_MI1_ADDR, addr);
+-	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+-	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+-	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
++	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+ 	if (!ret) {
+ 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
+ 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+-				      MDIO_ATTEMPTS, 20);
++				      MDIO_ATTEMPTS, 10);
+ 		if (!ret)
+ 			*valp = t3_read_reg(adapter, A_MI1_DATA);
+ 	}
+@@ -278,18 +290,14 @@ static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
+ 			 int reg_addr, unsigned int val)
+ {
+ 	int ret;
+-	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+ 
+ 	mutex_lock(&adapter->mdio_lock);
+-	t3_write_reg(adapter, A_MI1_ADDR, addr);
+-	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+-	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+-	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
++	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+ 	if (!ret) {
+ 		t3_write_reg(adapter, A_MI1_DATA, val);
+ 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+ 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+-				      MDIO_ATTEMPTS, 20);
++				      MDIO_ATTEMPTS, 10);
+ 	}
+ 	mutex_unlock(&adapter->mdio_lock);
+ 	return ret;
+@@ -434,27 +442,52 @@ int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
+ 	return mdio_write(phy, 0, MII_BMCR, ctl);
+ }
+ 
++int t3_phy_lasi_intr_enable(struct cphy *phy)
++{
++	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
++}
++
++int t3_phy_lasi_intr_disable(struct cphy *phy)
++{
++	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
++}
++
++int t3_phy_lasi_intr_clear(struct cphy *phy)
++{
++	u32 val;
++
++	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
++}
++
++int t3_phy_lasi_intr_handler(struct cphy *phy)
++{
++	unsigned int status;
++	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
++
++	if (err)
++		return err;
++	return (status & 1) ?  cphy_cause_link_change : 0;
++}
++
+ static const struct adapter_info t3_adap_info[] = {
+-	{2, 0, 0, 0,
++	{2, 0,
+ 	 F_GPIO2_OEN | F_GPIO4_OEN |
+-	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+-	 0,
++	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+ 	 &mi1_mdio_ops, "Chelsio PE9000"},
+-	{2, 0, 0, 0,
++	{2, 0,
+ 	 F_GPIO2_OEN | F_GPIO4_OEN |
+-	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+-	 0,
++	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+ 	 &mi1_mdio_ops, "Chelsio T302"},
+-	{1, 0, 0, 0,
++	{1, 0,
+ 	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
+ 	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+-	 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
++	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ 	 &mi1_mdio_ext_ops, "Chelsio T310"},
+-	{2, 0, 0, 0,
++	{2, 0,
+ 	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
+ 	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
+-	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
+-	 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
++	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
++	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ 	 &mi1_mdio_ext_ops, "Chelsio T320"},
+ };
+ 
+@@ -467,29 +500,23 @@ const struct adapter_info *t3_get_adapter_info(unsigned int id)
+ 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
+ }
+ 
+-#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
+-		 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
+-#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
++struct port_type_info {
++	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *ops);
++};
+ 
+ static const struct port_type_info port_types[] = {
+-	{NULL},
+-	{t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
+-	 "10GBASE-XR"},
+-	{t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
+-	 "10/100/1000BASE-T"},
+-	{NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
+-	 "10/100/1000BASE-T"},
+-	{t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
+-	{NULL, CAPS_10G, "10GBASE-KX4"},
+-	{t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
+-	{t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
+-	 "10GBASE-SR"},
+-	{NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
++	{ NULL },
++	{ t3_ael1002_phy_prep },
++	{ t3_vsc8211_phy_prep },
++	{ NULL},
++	{ t3_xaui_direct_phy_prep },
++	{ t3_ael2005_phy_prep },
++	{ t3_qt2045_phy_prep },
++	{ t3_ael1006_phy_prep },
++	{ NULL },
+ };
+ 
+-#undef CAPS_1G
+-#undef CAPS_10G
+-
+ #define VPD_ENTRY(name, len) \
+ 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+ 
+@@ -1132,6 +1159,15 @@ void t3_link_changed(struct adapter *adapter, int port_id)
+ 
+ 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+ 
++	if (lc->requested_fc & PAUSE_AUTONEG)
++		fc &= lc->requested_fc;
++	else
++		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
++
++	if (link_ok == lc->link_ok && speed == lc->speed &&
++	    duplex == lc->duplex && fc == lc->fc)
++		return;                            /* nothing changed */
++
+ 	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
+ 	    uses_xaui(adapter)) {
+ 		if (link_ok)
+@@ -1142,10 +1178,6 @@ void t3_link_changed(struct adapter *adapter, int port_id)
+ 	lc->link_ok = link_ok;
+ 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
+ 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+-	if (lc->requested_fc & PAUSE_AUTONEG)
+-		fc &= lc->requested_fc;
+-	else
+-		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ 
+ 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+ 		/* Set MAC speed, duplex, and flow control to match PHY. */
+@@ -1191,7 +1223,6 @@ int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+ 						   fc);
+ 			/* Also disables autoneg */
+ 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+-			phy->ops->reset(phy, 0);
+ 		} else
+ 			phy->ops->autoneg_enable(phy);
+ 	} else {
+@@ -1682,25 +1713,23 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
+  */
+ int t3_phy_intr_handler(struct adapter *adapter)
+ {
+-	u32 mask, gpi = adapter_info(adapter)->gpio_intr;
+ 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
+ 
+ 	for_each_port(adapter, i) {
+ 		struct port_info *p = adap2pinfo(adapter, i);
+ 
+-		mask = gpi - (gpi & (gpi - 1));
+-		gpi -= mask;
+-
+-		if (!(p->port_type->caps & SUPPORTED_IRQ))
++		if (!(p->phy.caps & SUPPORTED_IRQ))
+ 			continue;
+ 
+-		if (cause & mask) {
++		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
+ 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
+ 
+ 			if (phy_cause & cphy_cause_link_change)
+ 				t3_link_changed(adapter, i);
+ 			if (phy_cause & cphy_cause_fifo_error)
+ 				p->phy.fifo_errors++;
++			if (phy_cause & cphy_cause_module_change)
++				t3_os_phymod_changed(adapter, i);
+ 		}
+ 	}
+ 
+@@ -1763,6 +1792,17 @@ int t3_slow_intr_handler(struct adapter *adapter)
+ 	return 1;
+ }
+ 
++static unsigned int calc_gpio_intr(struct adapter *adap)
++{
++	unsigned int i, gpi_intr = 0;
++
++	for_each_port(adap, i)
++		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
++		    adapter_info(adap)->gpio_intr[i])
++			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
++	return gpi_intr;
++}
++
+ /**
+  *	t3_intr_enable - enable interrupts
+  *	@adapter: the adapter whose interrupts should be enabled
+@@ -1805,10 +1845,8 @@ void t3_intr_enable(struct adapter *adapter)
+ 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
+ 	}
+ 
+-	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
+-		     adapter_info(adapter)->gpio_intr);
+-	t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
+-		     adapter_info(adapter)->gpio_intr);
++	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
++
+ 	if (is_pcie(adapter))
+ 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
+ 	else
+@@ -3329,6 +3367,8 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
+ 	init_hw_for_avail_ports(adapter, adapter->params.nports);
+ 	t3_sge_init(adapter, &adapter->params.sge);
+ 
++	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
++
+ 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
+ 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
+ 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
+@@ -3556,7 +3596,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ 		    int reset)
+ {
+ 	int ret;
+-	unsigned int i, j = 0;
++	unsigned int i, j = -1;
+ 
+ 	get_pci_mode(adapter, &adapter->params.pci);
+ 
+@@ -3620,22 +3660,24 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ 
+ 	for_each_port(adapter, i) {
+ 		u8 hw_addr[6];
++		const struct port_type_info *pti;
+ 		struct port_info *p = adap2pinfo(adapter, i);
+ 
+-		while (!adapter->params.vpd.port_type[j])
+-			++j;
++		while (!adapter->params.vpd.port_type[++j])
++			;
+ 
+-		p->port_type = &port_types[adapter->params.vpd.port_type[j]];
+- 		if (!p->port_type->phy_prep) {
++		pti = &port_types[adapter->params.vpd.port_type[j]];
++ 		if (!pti->phy_prep) {
+  			CH_ALERT(adapter, "Invalid port type index %d\n",
+  				 adapter->params.vpd.port_type[j]);
+  			return -EINVAL;
+  		}
+  
+-		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
++		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+ 				       ai->mdio_ops);
++		if (ret)
++			return ret;
+ 		mac_prep(&p->mac, adapter, j);
+-		++j;
+ 
+ 		/*
+ 		 * The VPD EEPROM stores the base Ethernet address for the
+@@ -3649,9 +3691,9 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ 		       ETH_ALEN);
+ 		memcpy(adapter->port[i]->perm_addr, hw_addr,
+ 		       ETH_ALEN);
+-		init_link_config(&p->link_config, p->port_type->caps);
++		init_link_config(&p->link_config, p->phy.caps);
+ 		p->phy.ops->power_down(&p->phy, 1);
+-		if (!(p->port_type->caps & SUPPORTED_IRQ))
++		if (!(p->phy.caps & SUPPORTED_IRQ))
+ 			adapter->params.linkpoll_period = 10;
+ 	}
+ 
+@@ -3667,7 +3709,7 @@ void t3_led_ready(struct adapter *adapter)
+ int t3_replay_prep_adapter(struct adapter *adapter)
+ {
+ 	const struct adapter_info *ai = adapter->params.info;
+-	unsigned int i, j = 0;
++	unsigned int i, j = -1;
+ 	int ret;
+ 
+ 	early_hw_init(adapter, ai);
+@@ -3676,15 +3718,17 @@ int t3_replay_prep_adapter(struct adapter *adapter)
+ 		return ret;
+ 
+ 	for_each_port(adapter, i) {
++		const struct port_type_info *pti;
+ 		struct port_info *p = adap2pinfo(adapter, i);
+-		while (!adapter->params.vpd.port_type[j])
+-			++j;
+ 
+-		p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+-					ai->mdio_ops);
++		while (!adapter->params.vpd.port_type[++j])
++			;
+ 
++		pti = &port_types[adapter->params.vpd.port_type[j]];
++		ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
++		if (ret)
++			return ret;
+ 		p->phy.ops->power_down(&p->phy, 1);
+-		++j;
+ 	}
+ 
+ return 0;
+diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
+index eee4285..68e6334 100644
+--- a/drivers/net/cxgb3/vsc8211.c
++++ b/drivers/net/cxgb3/vsc8211.c
+@@ -221,8 +221,12 @@ static struct cphy_ops vsc8211_ops = {
+ 	.power_down = vsc8211_power_down,
+ };
+ 
+-void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+-			 int phy_addr, const struct mdio_ops *mdio_ops)
++int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
++			int phy_addr, const struct mdio_ops *mdio_ops)
+ {
+-	cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
++	cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops,
++		  SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
++		  SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
++		  SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
++	return 0;
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00700_firmware_to_7_4.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00700_firmware_to_7_4.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/cxgb3_00700_firmware_to_7_4.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,12 @@
+diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
+index 29db711..5a7adba 100644
+--- a/drivers/net/cxgb3/version.h
++++ b/drivers/net/cxgb3/version.h
+@@ -39,6 +39,6 @@
+ 
+ /* Firmware version */
+ #define FW_VERSION_MAJOR 7
+-#define FW_VERSION_MINOR 0
++#define FW_VERSION_MINOR 4
+ #define FW_VERSION_MICRO 0
+ #endif				/* __CHELSIO_VERSION_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0050_pd_locking_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0050_pd_locking_fix.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0050_pd_locking_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -5,9 +5,9 @@
 Signed-off-by: Dave Olson <dave.olson at qlogic.com>
 ---
 
-diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_driver.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_driver.c
---- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_driver.c	2008-10-23 10:44:37.000000000 -0700
-+++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_driver.c	2008-10-28 11:55:46.000000000 -0700
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_driver.c ofed_kernel/drivers/infiniband/hw/ipath/ipath_driver.c
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_driver.c	2009-01-12 10:44:16.000000000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_driver.c	2009-01-12 15:26:55.093509000 -0800
 @@ -661,6 +661,8 @@ bail:
  static void __devexit cleanup_device(struct ipath_devdata *dd)
  {
@@ -95,9 +95,9 @@
  	return any;
  }
  
-diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_file_ops.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_file_ops.c
---- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_file_ops.c	2008-10-23 10:44:37.000000000 -0700
-+++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_file_ops.c	2008-10-28 11:55:34.000000000 -0700
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_file_ops.c ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_file_ops.c	2009-01-12 10:44:16.000000000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_file_ops.c	2009-01-12 15:26:55.111489000 -0800
 @@ -2041,7 +2041,9 @@ static int ipath_close(struct inode *in,
  	struct ipath_filedata *fd;
  	struct ipath_portdata *pd;
@@ -145,9 +145,9 @@
  	mutex_unlock(&ipath_mutex);
  	ipath_free_pddata(dd, pd); /* after releasing the mutex */
  
-diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_init_chip.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_init_chip.c
---- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_init_chip.c	2008-10-23 10:44:37.000000000 -0700
-+++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_init_chip.c	2008-10-28 11:54:51.000000000 -0700
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_init_chip.c ofed_kernel/drivers/infiniband/hw/ipath/ipath_init_chip.c
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_init_chip.c	2009-01-12 10:44:16.000000000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_init_chip.c	2009-01-12 15:26:55.123479000 -0800
 @@ -229,6 +229,7 @@ static int init_chip_first(struct ipath_
  	spin_lock_init(&dd->ipath_kernel_tid_lock);
  	spin_lock_init(&dd->ipath_user_tid_lock);
@@ -156,9 +156,56 @@
  	spin_lock_init(&dd->ipath_sdma_lock);
  	spin_lock_init(&dd->ipath_gpio_lock);
  	spin_lock_init(&dd->ipath_eep_st_lock);
-diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_kernel.h ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_kernel.h
---- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_kernel.h	2008-10-23 10:44:37.000000000 -0700
-+++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_kernel.h	2008-10-28 11:54:51.000000000 -0700
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_intr.c ofed_kernel/drivers/infiniband/hw/ipath/ipath_intr.c
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_intr.c	2009-01-12 10:44:16.000000000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_intr.c	2009-01-12 15:41:37.110608000 -0800
+@@ -587,8 +587,10 @@ static int handle_hdrq_full(struct ipath
+ 	int chkerrpkts = 0;
+ 	u32 hd, tl;
+ 	u32 i;
++	unsigned long flags;
+ 
+ 	ipath_stats.sps_hdrqfull++;
++	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
+ 	for (i = 0; i < dd->ipath_cfgports; i++) {
+ 		struct ipath_portdata *pd = dd->ipath_pd[i];
+ 
+@@ -624,6 +626,7 @@ static int handle_hdrq_full(struct ipath
+ 			wake_up_interruptible(&pd->port_wait);
+ 		}
+ 	}
++	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
+ 
+ 	return chkerrpkts;
+ }
+@@ -1020,6 +1023,7 @@ static void handle_urcv(struct ipath_dev
+ 	u64 portr;
+ 	int i;
+ 	int rcvdint = 0;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
+@@ -1035,6 +1039,7 @@ static void handle_urcv(struct ipath_dev
+ 		 dd->ipath_i_rcvavail_mask) |
+ 		((istat >> dd->ipath_i_rcvurg_shift) &
+ 		 dd->ipath_i_rcvurg_mask);
++	spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
+ 	for (i = 1; i < dd->ipath_cfgports; i++) {
+ 		struct ipath_portdata *pd = dd->ipath_pd[i];
+ 
+@@ -1052,6 +1057,8 @@ static void handle_urcv(struct ipath_dev
+ 			}
+ 		}
+ 	}
++	spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
++	
+ 	if (rcvdint) {
+ 		/* only want to take one interrupt, so turn off the rcv
+ 		 * interrupt for all the ports that we set the rcv_waiting
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_kernel.h ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_kernel.h	2009-01-12 10:44:16.000000000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_kernel.h	2009-01-12 15:26:55.138464000 -0800
 @@ -464,6 +464,8 @@ struct ipath_devdata {
  	spinlock_t ipath_kernel_tid_lock;
  	spinlock_t ipath_user_tid_lock;
@@ -168,9 +215,9 @@
  
  	/*
  	 * IPATH_STATUS_*,
-diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_keys.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_keys.c
---- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_keys.c	2008-10-23 10:44:37.000000000 -0700
-+++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_keys.c	2008-10-28 11:54:51.000000000 -0700
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_keys.c ofed_kernel/drivers/infiniband/hw/ipath/ipath_keys.c
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_keys.c	2009-01-12 10:44:16.000000000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_keys.c	2009-01-12 15:26:55.149451000 -0800
 @@ -132,6 +132,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, s
  	 * (see ipath_get_dma_mr and ipath_dma.c).
  	 */
@@ -187,9 +234,9 @@
  		struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
  
  		if (pd->user) {
-diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_mad.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_mad.c
---- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_mad.c	2008-10-23 10:44:37.000000000 -0700
-+++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_mad.c	2008-10-28 11:54:51.000000000 -0700
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_mad.c ofed_kernel/drivers/infiniband/hw/ipath/ipath_mad.c
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_mad.c	2009-01-12 10:44:16.000000000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_mad.c	2009-01-12 15:26:55.164437000 -0800
 @@ -348,6 +348,7 @@ bail:
   */
  static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
@@ -206,10 +253,10 @@
  	pd = dd->ipath_pd[0];
  
  	for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-diff -upr ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_verbs.c ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_verbs.c
---- ofed_kernel-fixes-orig/drivers/infiniband/hw/ipath/ipath_verbs.c	2008-10-23 10:44:39.000000000 -0700
-+++ ofed_kernel-fixes/drivers/infiniband/hw/ipath/ipath_verbs.c	2008-10-28 11:54:51.000000000 -0700
-@@ -1852,7 +1852,7 @@ unsigned ipath_get_npkeys(struct ipath_d
+diff -upr ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_verbs.c ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.c
+--- ofed_kernel-orig/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-01-12 15:23:06.740092000 -0800
++++ ofed_kernel/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-01-12 15:26:55.202398000 -0800
+@@ -1863,7 +1863,7 @@ unsigned ipath_get_npkeys(struct ipath_d
  }
  
  /**
@@ -218,7 +265,7 @@
   * @dd: the infinipath device
   * @index: the PKEY index
   */
-@@ -1860,6 +1860,7 @@ unsigned ipath_get_pkey(struct ipath_dev
+@@ -1871,6 +1871,7 @@ unsigned ipath_get_pkey(struct ipath_dev
  {
  	unsigned ret;
  

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0055_pat.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0055_pat.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0055_pat.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -467,7 +467,7 @@
 + *   2 is WC via PAT and over-ride chip-set wc errata and PAT checks
 + *   If PAT initialization fails, code reverts back to MTRR
 + */
-+unsigned ipath_wc_pat; /* current default (0) is to use MTRR not PAT */
++unsigned ipath_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
 +module_param_named(wc_pat, ipath_wc_pat, uint, S_IRUGO);
 +MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
 +

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0090_mr_refcount.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0090_mr_refcount.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0090_mr_refcount.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,1656 @@
+IB/ipath: Fix memory region reference counts for resent RC packets
+
+The verbs functions for freeing a memory region don't prevent a user
+process from freeing a memory region while work requests still reference
+it. Also, if a RC QP work request is being resent, it has an implicit
+reference to the memory region via the LKey. The ACK can come back
+and generate a completion which could signal the verbs consumer that
+it is safe to free a memory region when it is actually still being
+referenced by the driver. This patch delays the completion entry
+until such packets are finished being sent.
+
+Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
+diff -up a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
+--- a/drivers/infiniband/hw/ipath/ipath_keys.c	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_keys.c	2008-12-10 18:07:56.000000000 -0800
+@@ -93,17 +93,35 @@ bail:
+  * @rkt: table from which to free the lkey
+  * @lkey: lkey id to free
+  */
+-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
++int ipath_free_lkey(struct ipath_ibdev *dev, struct ipath_mregion *mr)
+ {
+ 	unsigned long flags;
++	u32 lkey = mr->lkey;
+ 	u32 r;
++	int ret;
+ 
+-	if (lkey == 0)
+-		return;
+-	r = lkey >> (32 - ib_ipath_lkey_table_size);
+-	spin_lock_irqsave(&rkt->lock, flags);
+-	rkt->table[r] = NULL;
+-	spin_unlock_irqrestore(&rkt->lock, flags);
++	spin_lock_irqsave(&dev->lk_table.lock, flags);
++	if (lkey == 0) {
++		if (dev->dma_mr) {
++			ret = atomic_read(&dev->dma_mr->refcount);
++			if (!ret && dev->dma_mr == mr)
++				dev->dma_mr = NULL;
++		} else
++			ret = 0;
++	} else {
++		r = lkey >> (32 - ib_ipath_lkey_table_size);
++		ret = atomic_read(&dev->lk_table.table[r]->refcount);
++		if (!ret)
++			dev->lk_table.table[r] = NULL;
++	}
++	spin_unlock_irqrestore(&dev->lk_table.lock, flags);
++
++	if (ret) {
++		ipath_dbg("ipath_free_lkey: MR busy (LKEY %x cnt %u)\n",
++			  lkey, ret);
++		ret = -EBUSY;
++	}
++	return ret;
+ }
+ 
+ /**
+@@ -125,41 +143,41 @@ int ipath_lkey_ok(struct ipath_qp *qp, s
+ 	struct ipath_mregion *mr;
+ 	unsigned n, m;
+ 	size_t off;
+-	int ret;
++	int ret = 0;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * We use LKEY == zero for kernel virtual addresses
+ 	 * (see ipath_get_dma_mr and ipath_dma.c).
+ 	 */
++	spin_lock_irqsave(&rkt->lock, flags);
+ 	if (sge->lkey == 0) {
+-		/* always a kernel port, no locking needed */
+ 		struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
++		struct ipath_ibdev *dev = to_idev(pd->ibpd.device);
+ 
+-		if (pd->user) {
+-			ret = 0;
++		if (pd->user)
+ 			goto bail;
+-		}
+-		isge->mr = NULL;
++		if (!dev->dma_mr)
++			goto bail;
++		atomic_inc(&dev->dma_mr->refcount);
++		isge->mr = dev->dma_mr;
+ 		isge->vaddr = (void *) sge->addr;
+ 		isge->length = sge->length;
+ 		isge->sge_length = sge->length;
+-		ret = 1;
+-		goto bail;
++		isge->m = 0;
++		isge->n = 0;
++		goto ok;
+ 	}
+ 	mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
+ 	if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
+-		     qp->ibqp.pd != mr->pd)) {
+-		ret = 0;
++		     qp->ibqp.pd != mr->pd))
+ 		goto bail;
+-	}
+ 
+ 	off = sge->addr - mr->user_base;
+ 	if (unlikely(sge->addr < mr->user_base ||
+ 		     off + sge->length > mr->length ||
+-		     (mr->access_flags & acc) != acc)) {
+-		ret = 0;
++		     (mr->access_flags & acc) != acc))
+ 		goto bail;
+-	}
+ 
+ 	off += mr->offset;
+ 	m = 0;
+@@ -172,16 +190,17 @@ int ipath_lkey_ok(struct ipath_qp *qp, s
+ 			n = 0;
+ 		}
+ 	}
++	atomic_inc(&mr->refcount);
+ 	isge->mr = mr;
+ 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ 	isge->length = mr->map[m]->segs[n].length - off;
+ 	isge->sge_length = sge->length;
+ 	isge->m = m;
+ 	isge->n = n;
+-
++ok:
+ 	ret = 1;
+-
+ bail:
++	spin_unlock_irqrestore(&rkt->lock, flags);
+ 	return ret;
+ }
+ 
+@@ -205,43 +224,45 @@ int ipath_rkey_ok(struct ipath_qp *qp, s
+ 	struct ipath_mregion *mr;
+ 	unsigned n, m;
+ 	size_t off;
+-	int ret;
++	int ret = 0;
++	unsigned long flags;
++
++	ss->num_sge = 0;
+ 
+ 	/*
+ 	 * We use RKEY == zero for kernel virtual addresses
+ 	 * (see ipath_get_dma_mr and ipath_dma.c).
+ 	 */
++	spin_lock_irqsave(&rkt->lock, flags);
+ 	if (rkey == 0) {
+-		/* always a kernel port, no locking needed */
+ 		struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
++		struct ipath_ibdev *dev = to_idev(pd->ibpd.device);
+ 
+-		if (pd->user) {
+-			ret = 0;
++		if (pd->user)
+ 			goto bail;
+-		}
+-		sge->mr = NULL;
++		if (!dev->dma_mr)
++			goto bail;
++		atomic_inc(&dev->dma_mr->refcount);
++		sge->mr = dev->dma_mr;
+ 		sge->vaddr = (void *) vaddr;
+ 		sge->length = len;
+ 		sge->sge_length = len;
++		sge->m = 0;
++		sge->n = 0;
+ 		ss->sg_list = NULL;
+ 		ss->num_sge = 1;
+-		ret = 1;
+-		goto bail;
++		goto ok;
+ 	}
+ 
+ 	mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
+ 	if (unlikely(mr == NULL || mr->lkey != rkey ||
+-		     qp->ibqp.pd != mr->pd)) {
+-		ret = 0;
++		     qp->ibqp.pd != mr->pd))
+ 		goto bail;
+-	}
+ 
+ 	off = vaddr - mr->iova;
+ 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+-		     (mr->access_flags & acc) == 0)) {
+-		ret = 0;
++		     (mr->access_flags & acc) == 0))
+ 		goto bail;
+-	}
+ 
+ 	off += mr->offset;
+ 	m = 0;
+@@ -254,6 +275,7 @@ int ipath_rkey_ok(struct ipath_qp *qp, s
+ 			n = 0;
+ 		}
+ 	}
++	atomic_inc(&mr->refcount);
+ 	sge->mr = mr;
+ 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ 	sge->length = mr->map[m]->segs[n].length - off;
+@@ -262,9 +284,9 @@ int ipath_rkey_ok(struct ipath_qp *qp, s
+ 	sge->n = n;
+ 	ss->sg_list = NULL;
+ 	ss->num_sge = 1;
+-
++ok:
+ 	ret = 1;
+-
+ bail:
++	spin_unlock_irqrestore(&rkt->lock, flags);
+ 	return ret;
+ }
+diff -up a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
+--- a/drivers/infiniband/hw/ipath/ipath_mr.c	2008-12-10 18:04:03.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_mr.c	2008-12-10 18:07:56.000000000 -0800
+@@ -35,6 +35,7 @@
+ #include <rdma/ib_pack.h>
+ #include <rdma/ib_smi.h>
+ 
++#include "ipath_kernel.h"
+ #include "ipath_verbs.h"
+ 
+ /* Fast memory region */
+@@ -60,8 +61,15 @@ static inline struct ipath_fmr *to_ifmr(
+  */
+ struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
+ {
++	struct ipath_ibdev *dev = to_idev(pd->device);
+ 	struct ipath_mr *mr;
+ 	struct ib_mr *ret;
++	unsigned long flags;
++
++	if (to_ipd(pd)->user) {
++		ret = ERR_PTR(-EPERM);
++		goto bail;
++	}
+ 
+ 	mr = kzalloc(sizeof *mr, GFP_KERNEL);
+ 	if (!mr) {
+@@ -70,6 +78,13 @@ struct ib_mr *ipath_get_dma_mr(struct ib
+ 	}
+ 
+ 	mr->mr.access_flags = acc;
++	atomic_set(&mr->mr.refcount, 0);
++
++	spin_lock_irqsave(&dev->lk_table.lock, flags);
++	if (!dev->dma_mr)
++		dev->dma_mr = &mr->mr;
++	spin_unlock_irqrestore(&dev->lk_table.lock, flags);
++
+ 	ret = &mr->ibmr;
+ 
+ bail:
+@@ -104,6 +119,7 @@ static struct ipath_mr *alloc_mr(int cou
+ 		goto bail;
+ 	mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
+ 
++	atomic_set(&mr->mr.refcount, 0);
+ 	goto done;
+ 
+ bail:
+@@ -258,9 +274,14 @@ bail:
+ int ipath_dereg_mr(struct ib_mr *ibmr)
+ {
+ 	struct ipath_mr *mr = to_imr(ibmr);
++	struct ipath_ibdev *dev = to_idev(ibmr->device);
++	int ret;
+ 	int i;
+ 
+-	ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
++	ret = ipath_free_lkey(dev, &mr->mr);
++	if (ret)
++		return ret;
++
+ 	i = mr->mr.mapsz;
+ 	while (i) {
+ 		i--;
+@@ -324,6 +345,7 @@ struct ib_fmr *ipath_alloc_fmr(struct ib
+ 	fmr->mr.max_segs = fmr_attr->max_pages;
+ 	fmr->page_shift = fmr_attr->page_shift;
+ 
++	atomic_set(&fmr->mr.refcount, 0);
+ 	ret = &fmr->ibfmr;
+ 	goto done;
+ 
+@@ -357,6 +379,12 @@ int ipath_map_phys_fmr(struct ib_fmr *ib
+ 	u32 ps;
+ 	int ret;
+ 
++	if (atomic_read(&fmr->mr.refcount)) {
++		ipath_dbg("FMR modified when busy (LKEY %x cnt %u)\n",
++			  fmr->mr.lkey, atomic_read(&fmr->mr.refcount));
++		return -EBUSY;
++	}
++
+ 	if (list_len > fmr->mr.max_segs) {
+ 		ret = -EINVAL;
+ 		goto bail;
+@@ -400,6 +428,10 @@ int ipath_unmap_fmr(struct list_head *fm
+ 	list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ 		rkt = &to_idev(fmr->ibfmr.device)->lk_table;
+ 		spin_lock_irqsave(&rkt->lock, flags);
++		if (atomic_read(&fmr->mr.refcount))
++			ipath_dbg("FMR busy (LKEY %x cnt %u)\n",
++				  fmr->mr.lkey, atomic_read(&fmr->mr.refcount));
++
+ 		fmr->mr.user_base = 0;
+ 		fmr->mr.iova = 0;
+ 		fmr->mr.length = 0;
+@@ -417,9 +449,13 @@ int ipath_unmap_fmr(struct list_head *fm
+ int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
+ {
+ 	struct ipath_fmr *fmr = to_ifmr(ibfmr);
++	int ret;
+ 	int i;
+ 
+-	ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
++	ret = ipath_free_lkey(to_idev(ibfmr->device), &fmr->mr);
++	if (ret)
++		return ret;
++
+ 	i = fmr->mr.mapsz;
+ 	while (i)
+ 		kfree(fmr->mr.map[--i]);
+diff -up a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
+--- a/drivers/infiniband/hw/ipath/ipath_qp.c	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_qp.c	2008-12-10 18:07:56.000000000 -0800
+@@ -330,6 +330,10 @@ static void ipath_reset_qp(struct ipath_
+ 	qp->s_wqe = NULL;
+ 	qp->s_pkt_delay = 0;
+ 	qp->s_draining = 0;
++	qp->s_next_psn = 0;
++	qp->s_last_psn = 0;
++	qp->s_sending_psn = 0;
++	qp->s_sending_hpsn = 0;
+ 	qp->s_psn = 0;
+ 	qp->r_psn = 0;
+ 	qp->r_msn = 0;
+@@ -348,6 +352,7 @@ static void ipath_reset_qp(struct ipath_
+ 	qp->s_head = 0;
+ 	qp->s_tail = 0;
+ 	qp->s_cur = 0;
++	qp->s_acked = 0;
+ 	qp->s_last = 0;
+ 	qp->s_ssn = 1;
+ 	qp->s_lsn = 0;
+@@ -359,6 +364,54 @@ static void ipath_reset_qp(struct ipath_
+ 		qp->r_rq.wq->head = 0;
+ 		qp->r_rq.wq->tail = 0;
+ 	}
++	qp->r_sge.num_sge = 0;
++}
++
++static void clear_mr_refs(struct ipath_qp *qp, int clr_sends)
++{
++	unsigned n;
++
++	while (qp->r_sge.num_sge) {
++		atomic_dec(&qp->r_sge.sge.mr->refcount);
++		if (--qp->r_sge.num_sge)
++			qp->r_sge.sge = *qp->r_sge.sg_list++;
++	}
++
++	if (clr_sends) {
++		n = qp->s_last <= qp->s_head ? qp->s_head - qp->s_last :
++			qp->s_size - qp->s_last + qp->s_head;
++
++		while (qp->s_last != qp->s_head) {
++			struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
++			unsigned i;
++
++			for (i = 0; i < wqe->wr.num_sge; i++) {
++				struct ipath_sge *sge = &wqe->sg_list[i];
++
++				atomic_dec(&sge->mr->refcount);
++			}
++			if (++qp->s_last >= qp->s_size)
++				qp->s_last = 0;
++		}
++	}
++
++	if (qp->ibqp.qp_type != IB_QPT_RC)
++		return;
++
++	/* XXX Need to be sure that none of these are actively being sent */
++	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
++		struct ipath_ack_entry *e = &qp->s_ack_queue[n];
++		unsigned i;
++
++		if (e->opcode != IB_OPCODE_RC_RDMA_READ_REQUEST)
++			continue;
++		for (i = 0; i < e->rdma_sge.num_sge; i++) {
++			struct ipath_sge *sge = i ?
++				&e->rdma_sge.sg_list[i - 1] : &e->rdma_sge.sge;
++
++			atomic_dec(&sge->mr->refcount);
++		}
++	}
+ }
+ 
+ /**
+@@ -394,6 +447,8 @@ int ipath_error_qp(struct ipath_qp *qp, 
+ 	if (qp->s_last != qp->s_head)
+ 		ipath_schedule_send(qp);
+ 
++	clear_mr_refs(qp, 0);
++
+ 	memset(&wc, 0, sizeof(wc));
+ 	wc.qp = &qp->ibqp;
+ 	wc.opcode = IB_WC_RECV;
+@@ -552,8 +607,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, 
+ 		qp->remote_qpn = attr->dest_qp_num;
+ 
+ 	if (attr_mask & IB_QP_SQ_PSN) {
+-		qp->s_psn = qp->s_next_psn = attr->sq_psn;
+-		qp->s_last_psn = qp->s_next_psn - 1;
++		qp->s_sending_psn = qp->s_psn = qp->s_next_psn = attr->sq_psn;
++		qp->s_sending_hpsn = qp->s_last_psn = qp->s_next_psn - 1;
+ 	}
+ 
+ 	if (attr_mask & IB_QP_RQ_PSN)
+@@ -994,6 +1049,8 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
+ 
+ 	wait_event(qp->wait, !atomic_read(&qp->refcount));
+ 
++	clear_mr_refs(qp, 1);
++
+ 	/* all user's cleaned up, mark it available */
+ 	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
+ 	spin_lock(&dev->n_qps_lock);
+@@ -1067,12 +1124,4 @@ void ipath_get_credit(struct ipath_qp *q
+ 		if (ipath_cmp24(credit, qp->s_lsn) > 0)
+ 			qp->s_lsn = credit;
+ 	}
+-
+-	/* Restart sending if it was blocked due to lack of credits. */
+-	if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
+-	    qp->s_cur != qp->s_head &&
+-	    (qp->s_lsn == (u32) -1 ||
+-	     ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
+-			 qp->s_lsn + 1) <= 0))
+-		ipath_schedule_send(qp);
+ }
+diff -up a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c	2008-12-10 18:14:56.000000000 -0800
+@@ -49,7 +49,7 @@ static u32 restart_sge(struct ipath_sge_
+ 	ss->sg_list = wqe->sg_list + 1;
+ 	ss->num_sge = wqe->wr.num_sge;
+ 	ss->total_len = wqe->length;
+-	ipath_skip_sge(ss, len);
++	ipath_skip_sge(ss, len, 0);
+ 	return wqe->length - len;
+ }
+ 
+@@ -225,6 +225,7 @@ int ipath_make_rc_req(struct ipath_qp *q
+ 	char newreq;
+ 	unsigned long flags;
+ 	int ret = 0;
++	int delta;
+ 
+ 	ohdr = &qp->s_hdr.u.oth;
+ 	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+@@ -255,6 +256,12 @@ int ipath_make_rc_req(struct ipath_qp *q
+ 			goto bail;
+ 		}
+ 		wqe = get_swqe_ptr(qp, qp->s_last);
++		while (qp->s_last != qp->s_acked) {
++			ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
++			if (++qp->s_last >= qp->s_size)
++				qp->s_last = 0;
++			wqe = get_swqe_ptr(qp, qp->s_last);
++		}
+ 		ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ 		goto done;
+ 	}
+@@ -265,6 +272,15 @@ int ipath_make_rc_req(struct ipath_qp *q
+ 		goto bail;
+ 	}
+ 
++	/*
++	 * Leave BUSY set until sdma queue drains so we don't send
++	 * the same PSN multiple times.
++	 */
++	if (ipath_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
++		qp->s_flags |= IPATH_S_WAITING;
++		goto bail;
++	}
++
+ 	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ 	hwords = 5;
+ 	bth0 = 1 << 22; /* Set M bit */
+@@ -575,9 +591,8 @@ int ipath_make_rc_req(struct ipath_qp *q
+ 		ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
+ 		qp->s_state = OP(RDMA_READ_REQUEST);
+ 		hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+-		bth2 = qp->s_psn++ & IPATH_PSN_MASK;
+-		if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+-			qp->s_next_psn = qp->s_psn;
++		bth2 = qp->s_psn & IPATH_PSN_MASK;
++		qp->s_psn = wqe->lpsn + 1;
+ 		ss = NULL;
+ 		len = 0;
+ 		qp->s_cur++;
+@@ -585,7 +600,9 @@ int ipath_make_rc_req(struct ipath_qp *q
+ 			qp->s_cur = 0;
+ 		break;
+ 	}
+-	if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
++	qp->s_sending_hpsn = bth2;
++	delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
++	if (delta && delta % IPATH_PSN_CREDIT == 0)
+ 		bth2 |= 1 << 31;	/* Request ACK. */
+ 	qp->s_len -= len;
+ 	qp->s_hdrwords = hwords;
+@@ -725,7 +742,7 @@ done:
+  */
+ static void reset_psn(struct ipath_qp *qp, u32 psn)
+ {
+-	u32 n = qp->s_last;
++	u32 n = qp->s_acked;
+ 	struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
+ 	u32 opcode;
+ 
+@@ -794,6 +811,10 @@ static void reset_psn(struct ipath_qp *q
+ 	}
+ done:
+ 	qp->s_psn = psn;
++	if (ipath_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
++		qp->s_sending_psn = psn;
++		qp->s_sending_hpsn = psn - 1;
++	}
+ }
+ 
+ /**
+@@ -806,12 +827,17 @@ done:
+  */
+ void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
+ {
+-	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
++	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ 	struct ipath_ibdev *dev;
+ 
+ 	if (qp->s_retry == 0) {
+-		ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+-		ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
++		if (qp->s_last == qp->s_acked) {
++			ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
++			ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
++		} else {
++			/* XXX need to handle delayed completion */
++			ipath_dbg("Delayed too many retries\n");
++		}
+ 		goto bail;
+ 	}
+ 	qp->s_retry--;
+@@ -840,6 +866,99 @@ bail:
+ 	return;
+ }
+ 
++/*
++ * Set qp->s_sending_psn to the next PSN after the given one.
++ * This would be psn+1 except when RDMA reads are present.
++ */
++static void reset_sending_psn(struct ipath_qp *qp, u32 psn)
++{
++	struct ipath_swqe *wqe;
++	u32 n = qp->s_last;
++
++	/* Find the work request corresponding to the given PSN. */
++	for (;;) {
++		wqe = get_swqe_ptr(qp, n);
++		if (ipath_cmp24(psn, wqe->lpsn) <= 0) {
++			if (wqe->wr.opcode == IB_WR_RDMA_READ)
++				qp->s_sending_psn = wqe->lpsn + 1;
++			else
++				qp->s_sending_psn = psn + 1;
++			break;
++		}
++		if (++n == qp->s_size)
++			n = 0;
++		if (n == qp->s_tail)
++			break;
++	}
++}
++
++/*
++ * This should be called with the QP s_lock held and interrupts disabled.
++ */
++void ipath_rc_send_complete(struct ipath_qp *qp, struct ipath_ib_header *hdr)
++{
++	struct ipath_other_headers *ohdr;
++	struct ipath_swqe *wqe;
++	struct ib_wc wc;
++	unsigned i;
++	u32 opcode;
++	u32 psn;
++
++	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
++		return;
++
++	/* Find out where the BTH is */
++	if ((be16_to_cpu(hdr->lrh[0]) & 3) == IPATH_LRH_BTH)
++		ohdr = &hdr->u.oth;
++	else
++		ohdr = &hdr->u.l.oth;
++
++	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
++	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
++	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
++		/* XXX Need to handle MR refcount similar to requester */
++		return;
++	}
++
++	psn = be32_to_cpu(ohdr->bth[2]);
++	reset_sending_psn(qp, psn);
++
++	while (qp->s_last != qp->s_acked) {
++		wqe = get_swqe_ptr(qp, qp->s_last);
++		if (ipath_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
++		    ipath_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
++			break;
++		for (i = 0; i < wqe->wr.num_sge; i++) {
++			struct ipath_sge *sge = &wqe->sg_list[i];
++
++			atomic_dec(&sge->mr->refcount);
++		}
++		/* Post a send completion queue entry if requested. */
++		if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
++		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
++			memset(&wc, 0, sizeof wc);
++			wc.wr_id = wqe->wr.wr_id;
++			wc.status = IB_WC_SUCCESS;
++			wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
++			wc.qp = &qp->ibqp;
++			ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
++		}
++		if (++qp->s_last >= qp->s_size)
++			qp->s_last = 0;
++	}
++	/*
++	 * If we were waiting for sends to complete before resending,
++	 * and they are now complete, restart sending.
++	 */
++	if (qp->s_acked != qp->s_head &&
++	    ipath_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0 &&
++	    ipath_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
++		qp->s_sending_psn = qp->s_psn;
++		qp->s_sending_hpsn = qp->s_psn - 1;
++		ipath_schedule_send(qp);
++	}
++}
++
+ static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
+ {
+ 	qp->s_last_psn = psn;
+@@ -866,6 +985,7 @@ static int do_rc_ack(struct ipath_qp *qp
+ 	int ret = 0;
+ 	u32 ack_psn;
+ 	int diff;
++	unsigned i;
+ 
+ 	/*
+ 	 * Remove the QP from the timeout queue (or RNR timeout queue).
+@@ -887,7 +1007,7 @@ static int do_rc_ack(struct ipath_qp *qp
+ 	ack_psn = psn;
+ 	if (aeth >> 29)
+ 		ack_psn--;
+-	wqe = get_swqe_ptr(qp, qp->s_last);
++	wqe = get_swqe_ptr(qp, qp->s_acked);
+ 
+ 	/*
+ 	 * The MSN might be for a later WQE than the PSN indicates so
+@@ -947,65 +1067,78 @@ static int do_rc_ack(struct ipath_qp *qp
+ 			    qp->s_flags & IPATH_S_RDMAR_PENDING)
+ 				ipath_schedule_send(qp);
+ 		}
+-		/* Post a send completion queue entry if requested. */
+-		if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
+-		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+-			memset(&wc, 0, sizeof wc);
+-			wc.wr_id = wqe->wr.wr_id;
+-			wc.status = IB_WC_SUCCESS;
+-			wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+-			wc.byte_len = wqe->length;
+-			wc.qp = &qp->ibqp;
+-			wc.src_qp = qp->remote_qpn;
+-			wc.slid = qp->remote_ah_attr.dlid;
+-			wc.sl = qp->remote_ah_attr.sl;
+-			ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+-		}
++		/*
++		 * Don't decrement refcount and don't generate a
++		 * completion if the WQE is being resent until the send
++		 * is finished.
++		 */
++		if (ipath_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
++		    ipath_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
++			for (i = 0; i < wqe->wr.num_sge; i++) {
++				struct ipath_sge *sge = &wqe->sg_list[i];
++
++				atomic_dec(&sge->mr->refcount);
++			}
++			/* Post a send completion queue entry if requested. */
++			if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
++			    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
++				memset(&wc, 0, sizeof wc);
++				wc.wr_id = wqe->wr.wr_id;
++				wc.status = IB_WC_SUCCESS;
++				wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
++				wc.qp = &qp->ibqp;
++				ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
++						0);
++			}
++			if (++qp->s_last >= qp->s_size)
++				qp->s_last = 0;
++		} else
++			dev->n_rc_delayed_comp++;
+ 		qp->s_retry = qp->s_retry_cnt;
+ 		/*
+ 		 * If we are completing a request which is in the process of
+ 		 * being resent, we can stop resending it since we know the
+ 		 * responder has already seen it.
+ 		 */
+-		if (qp->s_last == qp->s_cur) {
++		if (qp->s_acked == qp->s_cur) {
+ 			if (++qp->s_cur >= qp->s_size)
+ 				qp->s_cur = 0;
+-			qp->s_last = qp->s_cur;
+-			if (qp->s_last == qp->s_tail)
++			qp->s_acked = qp->s_cur;
++			if (qp->s_acked == qp->s_tail)
+ 				break;
+ 			wqe = get_swqe_ptr(qp, qp->s_cur);
+ 			qp->s_state = OP(SEND_LAST);
+ 			qp->s_psn = wqe->psn;
+ 		} else {
+-			if (++qp->s_last >= qp->s_size)
+-				qp->s_last = 0;
+-			if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur)
++			if (++qp->s_acked >= qp->s_size)
++				qp->s_acked = 0;
++			if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
+ 				qp->s_draining = 0;
+-			if (qp->s_last == qp->s_tail)
++			if (qp->s_acked == qp->s_tail)
+ 				break;
+-			wqe = get_swqe_ptr(qp, qp->s_last);
++			wqe = get_swqe_ptr(qp, qp->s_acked);
+ 		}
+ 	}
+ 
+ 	switch (aeth >> 29) {
+ 	case 0:		/* ACK */
+ 		dev->n_rc_acks++;
+-		/* If this is a partial ACK, reset the retransmit timer. */
+-		if (qp->s_last != qp->s_tail) {
++		if (qp->s_acked != qp->s_tail) {
++			/*
++			 * We got a partial ACK for a resent operation so
++			 * reset the retransmit timer.
++			 */
+ 			spin_lock(&dev->pending_lock);
+ 			if (list_empty(&qp->timerwait))
+ 				list_add_tail(&qp->timerwait,
+ 					&dev->pending[dev->pending_index]);
+ 			spin_unlock(&dev->pending_lock);
+ 			/*
+-			 * If we get a partial ACK for a resent operation,
+-			 * we can stop resending the earlier packets and
++			 * We can stop resending the earlier packets and
+ 			 * continue with the next packet the receiver wants.
+ 			 */
+-			if (ipath_cmp24(qp->s_psn, psn) <= 0) {
++			if (ipath_cmp24(qp->s_psn, psn) <= 0)
+ 				reset_psn(qp, psn + 1);
+-				ipath_schedule_send(qp);
+-			}
+ 		} else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
+ 			qp->s_state = OP(SEND_LAST);
+ 			qp->s_psn = psn + 1;
+@@ -1014,12 +1147,16 @@ static int do_rc_ack(struct ipath_qp *qp
+ 		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ 		qp->s_retry = qp->s_retry_cnt;
+ 		update_last_psn(qp, psn);
++		if (qp->s_acked != qp->s_head)
++			ipath_schedule_send(qp);
++		else
++			qp->s_flags &= ~IPATH_S_WAITING;
+ 		ret = 1;
+ 		goto bail;
+ 
+ 	case 1:		/* RNR NAK */
+ 		dev->n_rnr_naks++;
+-		if (qp->s_last == qp->s_tail)
++		if (qp->s_acked == qp->s_tail)
+ 			goto bail;
+ 		if (qp->s_rnr_retry == 0) {
+ 			status = IB_WC_RNR_RETRY_EXC_ERR;
+@@ -1047,7 +1184,7 @@ static int do_rc_ack(struct ipath_qp *qp
+ 		goto bail;
+ 
+ 	case 3:		/* NAK */
+-		if (qp->s_last == qp->s_tail)
++		if (qp->s_acked == qp->s_tail)
+ 			goto bail;
+ 		/* The last valid PSN is the previous PSN. */
+ 		update_last_psn(qp, psn - 1);
+@@ -1078,8 +1215,13 @@ static int do_rc_ack(struct ipath_qp *qp
+ 			status = IB_WC_REM_OP_ERR;
+ 			dev->n_other_naks++;
+ 		class_b:
+-			ipath_send_complete(qp, wqe, status);
+-			ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
++			if (qp->s_last == qp->s_acked) {
++				ipath_send_complete(qp, wqe, status);
++				ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
++			} else {
++				/* XXX need to handle delayed completion */
++				ipath_dbg("Delayed error %d\n", status);
++			}
+ 			break;
+ 
+ 		default:
+@@ -1159,9 +1301,9 @@ static inline void ipath_rc_rcv_resp(str
+ 		goto ack_done;
+ 	}
+ 
+-	if (unlikely(qp->s_last == qp->s_tail))
++	if (unlikely(qp->s_acked == qp->s_tail))
+ 		goto ack_done;
+-	wqe = get_swqe_ptr(qp, qp->s_last);
++	wqe = get_swqe_ptr(qp, qp->s_acked);
+ 	status = IB_WC_SUCCESS;
+ 
+ 	switch (opcode) {
+@@ -1188,7 +1330,7 @@ static inline void ipath_rc_rcv_resp(str
+ 		    opcode != OP(RDMA_READ_RESPONSE_FIRST))
+ 			goto ack_done;
+ 		hdrsize += 4;
+-		wqe = get_swqe_ptr(qp, qp->s_last);
++		wqe = get_swqe_ptr(qp, qp->s_acked);
+ 		if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ 			goto ack_op_err;
+ 		qp->r_flags &= ~IPATH_R_RDMAR_SEQ;
+@@ -1236,7 +1378,7 @@ static inline void ipath_rc_rcv_resp(str
+ 		qp->s_rdma_read_len -= pmtu;
+ 		update_last_psn(qp, psn);
+ 		spin_unlock_irqrestore(&qp->s_lock, flags);
+-		ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
++		ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ 		goto bail;
+ 
+ 	case OP(RDMA_READ_RESPONSE_ONLY):
+@@ -1260,7 +1402,7 @@ static inline void ipath_rc_rcv_resp(str
+ 		 * have to be careful to copy the data to the right
+ 		 * location.
+ 		 */
+-		wqe = get_swqe_ptr(qp, qp->s_last);
++		wqe = get_swqe_ptr(qp, qp->s_acked);
+ 		qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ 						  wqe, psn, pmtu);
+ 		goto read_last;
+@@ -1296,7 +1438,8 @@ static inline void ipath_rc_rcv_resp(str
+ 			aeth = be32_to_cpu(((__be32 *) data)[0]);
+ 			data += sizeof(__be32);
+ 		}
+-		ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
++		ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
++		WARN_ON(qp->s_rdma_read_sge.num_sge);
+ 		(void) do_rc_ack(qp, aeth, psn,
+ 				 OP(RDMA_READ_RESPONSE_LAST), 0);
+ 		goto ack_done;
+@@ -1309,8 +1452,13 @@ ack_op_err:
+ ack_len_err:
+ 	status = IB_WC_LOC_LEN_ERR;
+ ack_err:
+-	ipath_send_complete(qp, wqe, status);
+-	ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
++	if (qp->s_last == qp->s_acked) {
++		ipath_send_complete(qp, wqe, status);
++		ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
++	} else {
++		/* XXX need to handle delayed completion */
++		ipath_dbg("Delayed error %d\n", status);
++	}
+ ack_done:
+ 	spin_unlock_irqrestore(&qp->s_lock, flags);
+ bail:
+@@ -1440,6 +1588,12 @@ static inline int ipath_rc_rcv_error(str
+ 		len = be32_to_cpu(reth->length);
+ 		if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
+ 			goto unlock_done;
++		for (i = 0; i < e->rdma_sge.num_sge; i++) {
++			struct ipath_sge *sge = i ?
++				&e->rdma_sge.sg_list[i - 1] : &e->rdma_sge.sge;
++
++			atomic_dec(&sge->mr->refcount);
++		}
+ 		if (len != 0) {
+ 			u32 rkey = be32_to_cpu(reth->rkey);
+ 			u64 vaddr = be64_to_cpu(reth->vaddr);
+@@ -1685,7 +1839,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 		qp->r_rcv_len += pmtu;
+ 		if (unlikely(qp->r_rcv_len > qp->r_len))
+ 			goto nack_inv;
+-		ipath_copy_sge(&qp->r_sge, data, pmtu);
++		ipath_copy_sge(&qp->r_sge, data, pmtu, 1);
+ 		break;
+ 
+ 	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+@@ -1728,7 +1882,12 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 		wc.byte_len = tlen + qp->r_rcv_len;
+ 		if (unlikely(wc.byte_len > qp->r_len))
+ 			goto nack_inv;
+-		ipath_copy_sge(&qp->r_sge, data, tlen);
++		ipath_copy_sge(&qp->r_sge, data, tlen, 1);
++		while (qp->r_sge.num_sge) {
++			atomic_dec(&qp->r_sge.sge.mr->refcount);
++			if (--qp->r_sge.num_sge)
++				qp->r_sge.sge = *qp->r_sge.sg_list++;
++		}
+ 		qp->r_msn++;
+ 		if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
+ 			break;
+@@ -1779,6 +1938,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 				goto nack_acc;
+ 		} else {
+ 			qp->r_sge.sg_list = NULL;
++			qp->r_sge.num_sge = 0;
+ 			qp->r_sge.sge.mr = NULL;
+ 			qp->r_sge.sge.vaddr = NULL;
+ 			qp->r_sge.sge.length = 0;
+@@ -1813,6 +1973,18 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 			ipath_update_ack_queue(qp, next);
+ 		}
+ 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
++		if (e->opcode == OP(RDMA_READ_REQUEST)) {
++			unsigned i;
++
++			for (i = 0; i < e->rdma_sge.num_sge; i++) {
++				struct ipath_sge *sge = i ?
++					&e->rdma_sge.sg_list[i - 1] :
++					&e->rdma_sge.sge;
++
++				atomic_dec(&sge->mr->refcount);
++			}
++			e->opcode = 0;
++		}
+ 		/* RETH comes after BTH */
+ 		if (!header_in_data)
+ 			reth = &ohdr->u.rc.reth;
+@@ -1890,6 +2062,19 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 				goto nack_inv_unlck;
+ 			ipath_update_ack_queue(qp, next);
+ 		}
++		e = &qp->s_ack_queue[qp->r_head_ack_queue];
++		if (e->opcode == OP(RDMA_READ_REQUEST)) {
++			unsigned i;
++
++			for (i = 0; i < e->rdma_sge.num_sge; i++) {
++				struct ipath_sge *sge = i ?
++					&e->rdma_sge.sg_list[i - 1] :
++					&e->rdma_sge.sge;
++
++				atomic_dec(&sge->mr->refcount);
++			}
++			e->opcode = 0;
++		}
+ 		if (!header_in_data)
+ 			ateth = &ohdr->u.atomic_eth;
+ 		else
+@@ -1907,12 +2092,13 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 		/* Perform atomic OP and save result. */
+ 		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ 		sdata = be64_to_cpu(ateth->swap_data);
+-		e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ 		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+ 			(u64) atomic64_add_return(sdata, maddr) - sdata :
+ 			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ 				      be64_to_cpu(ateth->compare_data),
+ 				      sdata);
++		atomic_dec(&qp->r_sge.sge.mr->refcount);
++		qp->r_sge.num_sge = 0;
+ 		e->opcode = opcode;
+ 		e->sent = 0;
+ 		e->psn = psn & IPATH_PSN_MASK;
+diff -up a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c	2008-12-10 18:07:56.000000000 -0800
+@@ -142,6 +142,12 @@ int ipath_init_sge(struct ipath_qp *qp, 
+ 	goto bail;
+ 
+ bad_lkey:
++	while (j) {
++		struct ipath_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
++
++		atomic_dec(&sge->mr->refcount);
++	}
++	ss->num_sge = 0;
+ 	memset(&wc, 0, sizeof(wc));
+ 	wc.wr_id = wqe->wr_id;
+ 	wc.status = IB_WC_LOC_PROT_ERR;
+@@ -268,6 +274,7 @@ static void ipath_ruc_loopback(struct ip
+ 	u64 sdata;
+ 	atomic64_t *maddr;
+ 	enum ib_wc_status send_status;
++	int release;
+ 
+ 	/*
+ 	 * Note that we check the responder QP state after
+@@ -325,6 +332,7 @@ again:
+ 	memset(&wc, 0, sizeof wc);
+ 	send_status = IB_WC_SUCCESS;
+ 
++	release = 1;
+ 	sqp->s_sge.sge = wqe->sg_list[0];
+ 	sqp->s_sge.sg_list = wqe->sg_list + 1;
+ 	sqp->s_sge.num_sge = wqe->wr.num_sge;
+@@ -368,6 +376,7 @@ again:
+ 					    wqe->wr.wr.rdma.rkey,
+ 					    IB_ACCESS_REMOTE_READ)))
+ 			goto acc_err;
++		release = 0;
+ 		qp->r_sge.sge = wqe->sg_list[0];
+ 		qp->r_sge.sg_list = wqe->sg_list + 1;
+ 		qp->r_sge.num_sge = wqe->wr.num_sge;
+@@ -391,6 +400,8 @@ again:
+ 			(u64) atomic64_add_return(sdata, maddr) - sdata :
+ 			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ 				      sdata, wqe->wr.wr.atomic.swap);
++		atomic_dec(&qp->r_sge.sge.mr->refcount);
++		qp->r_sge.num_sge = 0;
+ 		goto send_comp;
+ 
+ 	default:
+@@ -407,14 +418,16 @@ again:
+ 		if (len > sge->sge_length)
+ 			len = sge->sge_length;
+ 		BUG_ON(len == 0);
+-		ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
++		ipath_copy_sge(&qp->r_sge, sge->vaddr, len, release);
+ 		sge->vaddr += len;
+ 		sge->length -= len;
+ 		sge->sge_length -= len;
+ 		if (sge->sge_length == 0) {
++			if (!release)
++				atomic_dec(&sge->mr->refcount);
+ 			if (--sqp->s_sge.num_sge)
+ 				*sge = *sqp->s_sge.sg_list++;
+-		} else if (sge->length == 0 && sge->mr != NULL) {
++		} else if (sge->length == 0 && sge->mr->lkey) {
+ 			if (++sge->n >= IPATH_SEGSZ) {
+ 				if (++sge->m >= sge->mr->mapsz)
+ 					break;
+@@ -427,6 +440,12 @@ again:
+ 		}
+ 		sqp->s_len -= len;
+ 	}
++	if (release)
++		while (qp->r_sge.num_sge) {
++			atomic_dec(&qp->r_sge.sge.mr->refcount);
++			if (--qp->r_sge.num_sge)
++				qp->r_sge.sge = *qp->r_sge.sg_list++;
++		}
+ 
+ 	if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
+ 		goto send_comp;
+@@ -701,10 +720,16 @@ void ipath_send_complete(struct ipath_qp
+ 			 enum ib_wc_status status)
+ {
+ 	u32 old_last, last;
++	unsigned i;
+ 
+ 	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
+ 		return;
+ 
++	for (i = 0; i < wqe->wr.num_sge; i++) {
++		struct ipath_sge *sge = &wqe->sg_list[i];
++
++		atomic_dec(&sge->mr->refcount);
++	}
+ 	/* See ch. 11.2.4.1 and 10.7.3.1 */
+ 	if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
+ 	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+@@ -726,6 +751,8 @@ void ipath_send_complete(struct ipath_qp
+ 	if (++last >= qp->s_size)
+ 		last = 0;
+ 	qp->s_last = last;
++	if (qp->s_acked == old_last)
++		qp->s_acked = last;
+ 	if (qp->s_cur == old_last)
+ 		qp->s_cur = last;
+ 	if (qp->s_tail == old_last)
+diff -up a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- a/drivers/infiniband/hw/ipath/ipath_sdma.c	2008-12-10 18:04:03.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_sdma.c	2008-12-10 18:16:39.000000000 -0800
+@@ -698,10 +698,8 @@ retry:
+ 
+ 	addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
+ 			      tx->map_len, DMA_TO_DEVICE);
+-	if (dma_mapping_error(&dd->pcidev->dev, addr)) {
+-		ret = -EIO;
+-		goto unlock;
+-	}
++	if (dma_mapping_error(&dd->pcidev->dev, addr))
++		goto ioerr;
+ 
+ 	dwoffset = tx->map_len >> 2;
+ 	make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
+@@ -741,6 +739,8 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
++		if (dma_mapping_error(addr))
++			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */
+ 		if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
+@@ -761,7 +761,7 @@ retry:
+ 		if (sge->sge_length == 0) {
+ 			if (--ss->num_sge)
+ 				*sge = *ss->sg_list++;
+-		} else if (sge->length == 0 && sge->mr != NULL) {
++		} else if (sge->length == 0 && sge->mr->lkey) {
+ 			if (++sge->n >= IPATH_SEGSZ) {
+ 				if (++sge->m >= sge->mr->mapsz)
+ 					break;
+@@ -798,7 +798,18 @@ retry:
+ 	list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
+ 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
+ 		vl15_watchdog_enq(dd);
++	goto unlock;
+ 
++unmap:
++	while (tail != dd->ipath_sdma_descq_tail) {
++		if (!tail)
++			tail = dd->ipath_sdma_descq_cnt - 1;
++		else
++			tail--;
++		unmap_desc(dd, tail);
++	}
++ioerr:
++	ret = -EIO;
+ unlock:
+ 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+ fail:
+diff -up a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
+--- a/drivers/infiniband/hw/ipath/ipath_uc.c	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_uc.c	2008-12-10 18:07:56.000000000 -0800
+@@ -225,6 +225,26 @@ unlock:
+ 	return ret;
+ }
+ 
++static void fix_mr_refcount(struct ipath_qp *qp)
++{
++	unsigned i;
++
++	if (qp->r_sge.num_sge == qp->s_rdma_read_sge.num_sge)
++		return;
++	while (qp->r_sge.num_sge) {
++		atomic_dec(&qp->r_sge.sge.mr->refcount);
++		if (--qp->r_sge.num_sge)
++			qp->r_sge.sge = *qp->r_sge.sg_list++;
++	}
++	for (i = 0; i < qp->s_rdma_read_sge.num_sge; i++) {
++		struct ipath_sge *sge = i ?
++			&qp->s_rdma_read_sge.sg_list[i - 1] :
++			&qp->s_rdma_read_sge.sge;
++
++		atomic_inc(&sge->mr->refcount);
++	}
++}
++
+ /**
+  * ipath_uc_rcv - handle an incoming UC packet
+  * @dev: the device the packet came in on
+@@ -293,6 +313,11 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 		 */
+ 		qp->r_psn = psn;
+ 	inv:
++		while (qp->r_sge.num_sge) {
++			atomic_dec(&qp->r_sge.sge.mr->refcount);
++			if (--qp->r_sge.num_sge)
++				qp->r_sge.sge = *qp->r_sge.sg_list++;
++		}
+ 		qp->r_state = OP(SEND_LAST);
+ 		switch (opcode) {
+ 		case OP(SEND_FIRST):
+@@ -348,13 +373,13 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 	send_first:
+ 		if (qp->r_flags & IPATH_R_REUSE_SGE) {
+ 			qp->r_flags &= ~IPATH_R_REUSE_SGE;
++			fix_mr_refcount(qp);
+ 			qp->r_sge = qp->s_rdma_read_sge;
+ 		} else if (!ipath_get_rwqe(qp, 0)) {
+ 			dev->n_pkt_drops++;
+ 			goto done;
+-		}
+-		/* Save the WQE so we can reuse it in case of an error. */
+-		qp->s_rdma_read_sge = qp->r_sge;
++		} else
++			qp->s_rdma_read_sge = qp->r_sge;
+ 		qp->r_rcv_len = 0;
+ 		if (opcode == OP(SEND_ONLY))
+ 			goto send_last;
+@@ -374,7 +399,7 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 			dev->n_pkt_drops++;
+ 			goto done;
+ 		}
+-		ipath_copy_sge(&qp->r_sge, data, pmtu);
++		ipath_copy_sge(&qp->r_sge, data, pmtu, 1);
+ 		break;
+ 
+ 	case OP(SEND_LAST_WITH_IMMEDIATE):
+@@ -410,7 +435,12 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 		}
+ 		wc.opcode = IB_WC_RECV;
+ 	last_imm:
+-		ipath_copy_sge(&qp->r_sge, data, tlen);
++		ipath_copy_sge(&qp->r_sge, data, tlen, 1);
++		while (qp->r_sge.num_sge) {
++			atomic_dec(&qp->r_sge.sge.mr->refcount);
++			if (--qp->r_sge.num_sge)
++				qp->r_sge.sge = *qp->r_sge.sg_list++;
++		}
+ 		wc.wr_id = qp->r_wr_id;
+ 		wc.status = IB_WC_SUCCESS;
+ 		wc.qp = &qp->ibqp;
+@@ -452,6 +482,7 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 			}
+ 		} else {
+ 			qp->r_sge.sg_list = NULL;
++			qp->r_sge.num_sge = 0;
+ 			qp->r_sge.sge.mr = NULL;
+ 			qp->r_sge.sge.vaddr = NULL;
+ 			qp->r_sge.sge.length = 0;
+@@ -478,7 +509,7 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 			dev->n_pkt_drops++;
+ 			goto done;
+ 		}
+-		ipath_copy_sge(&qp->r_sge, data, pmtu);
++		ipath_copy_sge(&qp->r_sge, data, pmtu, 1);
+ 		break;
+ 
+ 	case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+@@ -533,7 +564,12 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 			dev->n_pkt_drops++;
+ 			goto done;
+ 		}
+-		ipath_copy_sge(&qp->r_sge, data, tlen);
++		ipath_copy_sge(&qp->r_sge, data, tlen, 1);
++		while (qp->r_sge.num_sge) {
++			atomic_dec(&qp->r_sge.sge.mr->refcount);
++			if (--qp->r_sge.num_sge)
++				qp->r_sge.sge = *qp->r_sge.sg_list++;
++		}
+ 		break;
+ 
+ 	default:
+diff -up a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
+--- a/drivers/infiniband/hw/ipath/ipath_ud.c	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_ud.c	2008-12-10 18:07:56.000000000 -0800
+@@ -132,14 +132,20 @@ static void ipath_ud_loopback(struct ipa
+ 	}
+ 	wqe = get_rwqe_ptr(rq, tail);
+ 	rsge.sg_list = qp->r_ud_sg_list;
+-	if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
++	if (unlikely(!ipath_init_sge(qp, wqe, &rlen, &rsge))) {
+ 		spin_unlock_irqrestore(&rq->lock, flags);
+ 		dev->n_pkt_drops++;
+ 		goto drop;
+ 	}
+ 	/* Silently drop packets which are too big. */
+-	if (wc.byte_len > rlen) {
++	if (unlikely(wc.byte_len > rlen)) {
++		unsigned i;
++
+ 		spin_unlock_irqrestore(&rq->lock, flags);
++		for (i = 0; i < rsge.num_sge; i++) {
++			sge = i ? &rsge.sg_list[i - 1] : &rsge.sge;
++			atomic_dec(&sge->mr->refcount);
++		}
+ 		dev->n_pkt_drops++;
+ 		goto drop;
+ 	}
+@@ -177,10 +183,10 @@ static void ipath_ud_loopback(struct ipa
+ 
+ 	ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
+ 	if (ah_attr->ah_flags & IB_AH_GRH) {
+-		ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
++		ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh), 1);
+ 		wc.wc_flags |= IB_WC_GRH;
+ 	} else
+-		ipath_skip_sge(&rsge, sizeof(struct ib_grh));
++		ipath_skip_sge(&rsge, sizeof(struct ib_grh), 1);
+ 	ssge.sg_list = swqe->sg_list + 1;
+ 	ssge.sge = *swqe->sg_list;
+ 	ssge.num_sge = swqe->wr.num_sge;
+@@ -193,14 +199,14 @@ static void ipath_ud_loopback(struct ipa
+ 		if (len > sge->sge_length)
+ 			len = sge->sge_length;
+ 		BUG_ON(len == 0);
+-		ipath_copy_sge(&rsge, sge->vaddr, len);
++		ipath_copy_sge(&rsge, sge->vaddr, len, 1);
+ 		sge->vaddr += len;
+ 		sge->length -= len;
+ 		sge->sge_length -= len;
+ 		if (sge->sge_length == 0) {
+ 			if (--ssge.num_sge)
+ 				*sge = *ssge.sg_list++;
+-		} else if (sge->length == 0 && sge->mr != NULL) {
++		} else if (sge->length == 0 && sge->mr->lkey) {
+ 			if (++sge->n >= IPATH_SEGSZ) {
+ 				if (++sge->m >= sge->mr->mapsz)
+ 					break;
+@@ -213,6 +219,11 @@ static void ipath_ud_loopback(struct ipa
+ 		}
+ 		length -= len;
+ 	}
++	while (rsge.num_sge) {
++		atomic_dec(&rsge.sge.mr->refcount);
++		if (--rsge.num_sge)
++			rsge.sge = *rsge.sg_list++;
++	}
+ 	wc.status = IB_WC_SUCCESS;
+ 	wc.opcode = IB_WC_RECV;
+ 	wc.qp = &qp->ibqp;
+@@ -550,12 +561,17 @@ void ipath_ud_rcv(struct ipath_ibdev *de
+ 	}
+ 	if (has_grh) {
+ 		ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+-			       sizeof(struct ib_grh));
++			       sizeof(struct ib_grh), 1);
+ 		wc.wc_flags |= IB_WC_GRH;
+ 	} else
+-		ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
++		ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ 	ipath_copy_sge(&qp->r_sge, data,
+-		       wc.byte_len - sizeof(struct ib_grh));
++		       wc.byte_len - sizeof(struct ib_grh), 1);
++	while (qp->r_sge.num_sge) {
++		atomic_dec(&qp->r_sge.sge.mr->refcount);
++		if (--qp->r_sge.num_sge)
++			qp->r_sge.sge = *qp->r_sge.sg_list++;
++	}
+ 	if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
+ 		goto bail;
+ 	wc.wr_id = qp->r_wr_id;
+diff -up a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.c	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.c	2008-12-10 18:22:23.000000000 -0800
+@@ -174,7 +174,8 @@ static __be64 sys_image_guid;
+  * @data: the data to copy
+  * @length: the length of the data
+  */
+-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
++void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length,
++		    int release)
+ {
+ 	struct ipath_sge *sge = &ss->sge;
+ 
+@@ -194,9 +195,11 @@ void ipath_copy_sge(struct ipath_sge_sta
+ 		sge->length -= len;
+ 		sge->sge_length -= len;
+ 		if (sge->sge_length == 0) {
++			if (release)
++				atomic_dec(&sge->mr->refcount);
+ 			if (--ss->num_sge)
+ 				*sge = *ss->sg_list++;
+-		} else if (sge->length == 0 && sge->mr != NULL) {
++		} else if (sge->length == 0 && sge->mr->lkey) {
+ 			if (++sge->n >= IPATH_SEGSZ) {
+ 				if (++sge->m >= sge->mr->mapsz)
+ 					break;
+@@ -217,7 +220,7 @@ void ipath_copy_sge(struct ipath_sge_sta
+  * @ss: the SGE state
+  * @length: the number of bytes to skip
+  */
+-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
++void ipath_skip_sge(struct ipath_sge_state *ss, u32 length, int release)
+ {
+ 	struct ipath_sge *sge = &ss->sge;
+ 
+@@ -233,9 +236,11 @@ void ipath_skip_sge(struct ipath_sge_sta
+ 		sge->length -= len;
+ 		sge->sge_length -= len;
+ 		if (sge->sge_length == 0) {
++			if (release)
++				atomic_dec(&sge->mr->refcount);
+ 			if (--ss->num_sge)
+ 				*sge = *ss->sg_list++;
+-		} else if (sge->length == 0 && sge->mr != NULL) {
++		} else if (sge->length == 0 && sge->mr->lkey) {
+ 			if (++sge->n >= IPATH_SEGSZ) {
+ 				if (++sge->m >= sge->mr->mapsz)
+ 					break;
+@@ -282,7 +287,7 @@ static u32 ipath_count_sge(struct ipath_
+ 		if (sge.sge_length == 0) {
+ 			if (--num_sge)
+ 				sge = *sg_list++;
+-		} else if (sge.length == 0 && sge.mr != NULL) {
++		} else if (sge.length == 0 && sge.mr->lkey) {
+ 			if (++sge.n >= IPATH_SEGSZ) {
+ 				if (++sge.m >= sge.mr->mapsz)
+ 					break;
+@@ -321,7 +326,7 @@ static void ipath_copy_from_sge(void *da
+ 		if (sge->sge_length == 0) {
+ 			if (--ss->num_sge)
+ 				*sge = *ss->sg_list++;
+-		} else if (sge->length == 0 && sge->mr != NULL) {
++		} else if (sge->length == 0 && sge->mr->lkey) {
+ 			if (++sge->n >= IPATH_SEGSZ) {
+ 				if (++sge->m >= sge->mr->mapsz)
+ 					break;
+@@ -406,10 +411,11 @@ static int ipath_post_one_send(struct ip
+ 	wqe = get_swqe_ptr(qp, qp->s_head);
+ 	wqe->wr = *wr;
+ 	wqe->length = 0;
++	j = 0;
+ 	if (wr->num_sge) {
+ 		acc = wr->opcode >= IB_WR_RDMA_READ ?
+ 			IB_ACCESS_LOCAL_WRITE : 0;
+-		for (i = 0, j = 0; i < wr->num_sge; i++) {
++		for (i = 0; i < wr->num_sge; i++) {
+ 			u32 length = wr->sg_list[i].length;
+ 			int ok;
+ 
+@@ -418,7 +424,7 @@ static int ipath_post_one_send(struct ip
+ 			ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
+ 					   &wr->sg_list[i], acc);
+ 			if (!ok)
+-				goto bail_inval;
++				goto bail_inval_free;
+ 			wqe->length += length;
+ 			j++;
+ 		}
+@@ -427,15 +433,21 @@ static int ipath_post_one_send(struct ip
+ 	if (qp->ibqp.qp_type == IB_QPT_UC ||
+ 	    qp->ibqp.qp_type == IB_QPT_RC) {
+ 		if (wqe->length > 0x80000000U)
+-			goto bail_inval;
++			goto bail_inval_free;
+ 	} else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
+-		goto bail_inval;
++		goto bail_inval_free;
+ 	wqe->ssn = qp->s_ssn++;
+ 	qp->s_head = next;
+ 
+ 	ret = 0;
+ 	goto bail;
+ 
++bail_inval_free:
++	while (j) {
++		struct ipath_sge *sge = &wqe->sg_list[--j];
++
++		atomic_dec(&sge->mr->refcount);
++	}
+ bail_inval:
+ 	ret = -EINVAL;
+ bail:
+@@ -760,7 +772,7 @@ static void ipath_ib_timer(struct ipath_
+ 		resend = qp->timer_next;
+ 
+ 		spin_lock_irqsave(&qp->s_lock, flags);
+-		if (qp->s_last != qp->s_tail &&
++		if (qp->s_acked != qp->s_tail &&
+ 		    ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
+ 			dev->n_timeouts++;
+ 			ipath_restart_rc(qp, qp->s_last_psn + 1);
+@@ -796,7 +808,7 @@ static void update_sge(struct ipath_sge_
+ 	if (sge->sge_length == 0) {
+ 		if (--ss->num_sge)
+ 			*sge = *ss->sg_list++;
+-	} else if (sge->length == 0 && sge->mr != NULL) {
++	} else if (sge->length == 0 && sge->mr->lkey) {
+ 		if (++sge->n >= IPATH_SEGSZ) {
+ 			if (++sge->m >= sge->mr->mapsz)
+ 				return;
+@@ -1047,6 +1059,8 @@ static void sdma_complete(void *cookie, 
+ 		spin_lock_irqsave(&qp->s_lock, flags);
+ 		if (tx->wqe)
+ 			ipath_send_complete(qp, tx->wqe, ibs);
++		else if (qp->ibqp.qp_type == IB_QPT_RC)
++			ipath_rc_send_complete(qp, &tx->hdr.hdr);
+ 		if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
+ 		     qp->s_last != qp->s_head) ||
+ 		    (qp->s_flags & IPATH_S_WAIT_DMA))
+@@ -1057,6 +1071,10 @@ static void sdma_complete(void *cookie, 
+ 		spin_lock_irqsave(&qp->s_lock, flags);
+ 		ipath_send_complete(qp, tx->wqe, ibs);
+ 		spin_unlock_irqrestore(&qp->s_lock, flags);
++	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
++		spin_lock_irqsave(&qp->s_lock, flags);
++		ipath_rc_send_complete(qp, &tx->hdr.hdr);
++		spin_unlock_irqrestore(&qp->s_lock, flags);
+ 	}
+ 
+ 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
+@@ -1314,6 +1332,10 @@ done:
+ 		spin_lock_irqsave(&qp->s_lock, flags);
+ 		ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ 		spin_unlock_irqrestore(&qp->s_lock, flags);
++	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
++		spin_lock_irqsave(&qp->s_lock, flags);
++		ipath_rc_send_complete(qp, ibhdr);
++		spin_unlock_irqrestore(&qp->s_lock, flags);
+ 	}
+ 	ret = 0;
+ bail:
+@@ -2238,6 +2260,8 @@ void ipath_unregister_ib_device(struct i
+ 		ipath_dev_err(dev->dd, "piowait list not empty!\n");
+ 	if (!list_empty(&dev->rnrwait))
+ 		ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
++	if (dev->dma_mr)
++		ipath_dev_err(dev->dd, "DMA MR not NULL!\n");
+ 	if (!ipath_mcast_tree_empty())
+ 		ipath_dev_err(dev->dd, "multicast table memory leak!\n");
+ 	/*
+@@ -2287,10 +2311,12 @@ static ssize_t show_stats(struct device 
+ 		container_of(device, struct ipath_ibdev, ibdev.dev);
+ 	int i;
+ 	int len;
++	struct ipath_qp_table *qpt;
++	unsigned long flags;
+ 
+ 	len = sprintf(buf,
+ 		      "RC resends  %d\n"
+-		      "RC no QACK  %d\n"
++		      "RC QACKs    %d\n"
+ 		      "RC ACKs     %d\n"
+ 		      "RC SEQ NAKs %d\n"
+ 		      "RC RDMA seq %d\n"
+@@ -2298,6 +2324,7 @@ static ssize_t show_stats(struct device 
+ 		      "RC OTH NAKs %d\n"
+ 		      "RC timeouts %d\n"
+ 		      "RC RDMA dup %d\n"
++		      "RC DComp    %d\n"
+ 		      "piobuf wait %d\n"
+ 		      "unaligned   %d\n"
+ 		      "PKT drops   %d\n"
+@@ -2305,7 +2332,8 @@ static ssize_t show_stats(struct device 
+ 		      dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
+ 		      dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
+ 		      dev->n_other_naks, dev->n_timeouts,
+-		      dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
++		      dev->n_rdma_dup_busy, dev->n_rc_delayed_comp,
++		      dev->n_piowait, dev->n_unaligned,
+ 		      dev->n_pkt_drops, dev->n_wqe_errs);
+ 	for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
+ 		const struct ipath_opcode_stats *si = &dev->opstats[i];
+@@ -2316,6 +2344,25 @@ static ssize_t show_stats(struct device 
+ 			       (unsigned long long) si->n_packets,
+ 			       (unsigned long long) si->n_bytes);
+ 	}
++	qpt = &dev->qp_table;
++	spin_lock_irqsave(&qpt->lock, flags);
++	for (i = 0; i < qpt->max; i++) {
++		struct ipath_qp *qp;
++		for (qp = qpt->table[i]; qp != NULL; qp = qp->next) {
++			len += sprintf(buf + len,
++				"QP%u %x PSN %x %x %x %x %x (%u %u %u %u %u)\n",
++				qp->ibqp.qp_num,
++				qp->s_flags,
++				qp->s_last_psn,
++				qp->s_psn,
++				qp->s_next_psn,
++				qp->s_sending_psn,
++				qp->s_sending_hpsn,
++				qp->s_last, qp->s_acked, qp->s_cur,
++				qp->s_tail, qp->s_head);
++		}
++	}
++	spin_unlock_irqrestore(&qpt->lock, flags);
+ 	return len;
+ }
+ 
+diff -up a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.h	2008-12-10 18:04:07.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.h	2008-12-10 18:07:56.000000000 -0800
+@@ -248,6 +248,7 @@ struct ipath_mregion {
+ 	int access_flags;
+ 	u32 max_segs;		/* number of ipath_segs in all the arrays */
+ 	u32 mapsz;		/* size of the map array */
++	atomic_t refcount;
+ 	struct ipath_segarray *map[0];	/* the segments */
+ };
+ 
+@@ -385,6 +386,8 @@ struct ipath_qp {
+ 	u32 s_rdma_read_len;	/* total length of s_rdma_read_sge */
+ 	u32 s_next_psn;		/* PSN for next request */
+ 	u32 s_last_psn;		/* last response PSN processed */
++	u32 s_sending_psn;	/* lowest PSN that is being sent */
++	u32 s_sending_hpsn;	/* highest PSN that is being sent */
+ 	u32 s_psn;		/* current packet sequence number */
+ 	u32 s_ack_rdma_psn;	/* PSN for sending RDMA read responses */
+ 	u32 s_ack_psn;		/* PSN for acking sends and RDMA writes */
+@@ -427,7 +430,8 @@ struct ipath_qp {
+ 	u32 s_head;		/* new entries added here */
+ 	u32 s_tail;		/* next entry to process */
+ 	u32 s_cur;		/* current work queue entry */
+-	u32 s_last;		/* last un-ACK'ed entry */
++	u32 s_acked;		/* last un-ACK'ed entry */
++	u32 s_last;		/* last completed entry */
+ 	u32 s_ssn;		/* SSN of tail entry */
+ 	u32 s_lsn;		/* limit sequence number (credit) */
+ 	struct ipath_swqe *s_wq;	/* send work queue */
+@@ -539,6 +543,7 @@ struct ipath_ibdev {
+ 	struct list_head pending_mmaps;
+ 	spinlock_t mmap_offset_lock;
+ 	u32 mmap_offset;
++	struct ipath_mregion *dma_mr;
+ 	int ib_unit;		/* This is the device number */
+ 	u16 sm_lid;		/* in host order */
+ 	u8 sm_sl;
+@@ -601,6 +606,7 @@ struct ipath_ibdev {
+ 	u32 n_rc_resends;
+ 	u32 n_rc_acks;
+ 	u32 n_rc_qacks;
++	u32 n_rc_delayed_comp;
+ 	u32 n_seq_naks;
+ 	u32 n_rdma_seq;
+ 	u32 n_rnr_naks;
+@@ -759,9 +765,10 @@ unsigned ipath_ib_rate_to_mult(enum ib_r
+ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
+ 		     u32 hdrwords, struct ipath_sge_state *ss, u32 len);
+ 
+-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
++void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length,
++		    int release);
+ 
+-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
++void ipath_skip_sge(struct ipath_sge_state *ss, u32 length, int release);
+ 
+ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ 		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
+@@ -771,6 +778,8 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 
+ void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
+ 
++void ipath_rc_send_complete(struct ipath_qp *qp, struct ipath_ib_header *hdr);
++
+ void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
+ 
+ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
+@@ -781,7 +790,7 @@ void ipath_ud_rcv(struct ipath_ibdev *de
+ int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
+ 		     struct ipath_mregion *mr);
+ 
+-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
++int ipath_free_lkey(struct ipath_ibdev *dev, struct ipath_mregion *mr);
+ 
+ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
+ 		  struct ib_sge *sge, int acc);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0100_prefunit.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0100_prefunit.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0100_prefunit.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,22 @@
+IB/ipath - fix prefunit logic in best unit calculation
+
+Broke when multiple HCAs were installed, but not connected, and taskset
+was used.
+
+Signed-off-by: John Gregor <john.gregor at qlogic.com>
+
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_file_ops.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_file_ops.c	2009-01-13 21:10:03.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_file_ops.c	2009-01-13 21:17:37.000000000 -0800
+@@ -1748,8 +1748,8 @@ recheck:
+ 			ipath_dbg("No ports available (none initialized "
+ 				  "and ready)\n");
+ 		} else {
+-			if (prefunit > 0) {
+-				/* if started above 0, retry from 0 */
++			if (prefunit != -1) {
++				/* if had prefunit, retry from 0 */
+ 				ipath_cdbg(PROC,
+ 					   "%s[%u] no ports on prefunit "
+ 					   "%d, clear and re-check\n",

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0110_sdmagenmismatch.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0110_sdmagenmismatch.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0110_sdmagenmismatch.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,64 @@
+IB/ipath: Fix recovery on sdmagenmismatch errors
+
+    The recovery code wasn't working because a status bit was being cleared
+    prior to the recovery code.  Change the order, and improve debug messages.
+    
+Signed-off-by: Dave Olson <dave.olson at qlogic.com>
+
+---
+ drivers/infiniband/hw/ipath/ipath_intr.c |   29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_intr.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_intr.c
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_intr.c
+@@ -549,13 +549,20 @@ static void handle_sdma_errors(struct ip
+ 		ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
+ 			"lengen 0x%lx\n", tl, hd, status, lengen);
+ 	}
+-
++	expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
++	ipath_dbg("%sxpected sdma error, sdma_status 0x%lx\n",
++		expected ?  "e" : "une", dd->ipath_sdma_status);
++	/*
++	 * we are in interrupt context (and only one interrupt vector),
++	 * so we won't get another interrupt and process the sdma state
++	 * change before the set_bit of SDMA_DISABLED.  We set DISABLED
++	 * here because there are cases where abort_task will not.
++	 */
++	if (!expected) /* must be prior to setting SDMA_DISABLED */
++		ipath_cancel_sends(dd, 1);
+ 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+ 	__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+-	expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
+ 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+-	if (!expected)
+-		ipath_cancel_sends(dd, 1);
+ }
+ 
+ static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
+@@ -570,13 +577,19 @@ static void handle_sdma_intr(struct ipat
+ 	if (istat & INFINIPATH_I_SDMADISABLED) {
+ 		expected = test_bit(IPATH_SDMA_ABORTING,
+ 			&dd->ipath_sdma_status);
+-		ipath_dbg("%s SDmaDisabled intr\n",
+-			expected ? "expected" : "unexpected");
++		ipath_dbg("%sxpected sdma disabled intr, sdma_status 0x%lx\n",
++			expected ?  "e" : "une", dd->ipath_sdma_status);
++		/*
++		 * we are in interrupt context (and only one interrupt vector),
++		 * so we won't get another interrupt and process the sdma state
++		 * change before the set_bit of SDMA_DISABLED.  We set DISABLED
++		 * here because there are cases where abort_task will not.
++		 */
++		if (!expected) /* must be prior to setting SDMA_DISABLED */
++			ipath_cancel_sends(dd, 1);
+ 		spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+ 		__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+ 		spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+-		if (!expected)
+-			ipath_cancel_sends(dd, 1);
+ 		if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+ 			tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
+ 	}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0120_ipath_do_user_init.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0120_ipath_do_user_init.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0120_ipath_do_user_init.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,25 @@
+diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+index 7193e3f..c152618 100644
+--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -1989,7 +1989,12 @@ static int ipath_do_user_init(struct file *fp,
+ 	 * explictly set the in-memory tail copy to 0 beforehand, so we
+ 	 * don't have to wait to be sure the DMA update has happened
+ 	 * (chip resets head/tail to 0 on transition to enable).
++	 * The mutex ensures that the read value of dd->ipath_rcvctrl
++         * after the atomic set_bit is not stale, and avoids a race
++         * hazard with 2 processes attempting to enable (distinct)
++	 * ports simultaneously.
+ 	 */
++	mutex_lock(&ipath_mutex);
+ 	set_bit(dd->ipath_r_portenable_shift + pd->port_port,
+ 		&dd->ipath_rcvctrl);
+ 	if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
+@@ -2001,6 +2006,7 @@ static int ipath_do_user_init(struct file *fp,
+ 	}
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ 			 dd->ipath_rcvctrl);
++	mutex_unlock(&ipath_mutex);
+ 	/* Notify any waiting slaves */
+ 	if (pd->port_subport_cnt) {
+ 		clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0130_freezemode.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0130_freezemode.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0130_freezemode.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,14 @@
+diff --git old/drivers/infiniband/hw/ipath/ipath_iba6120.c new/drivers/infiniband/hw/ipath/ipath_iba6120.c
+index ba1f51d..ac38ca8 100644
+--- old/drivers/infiniband/hw/ipath/ipath_iba6120.c
++++ new/drivers/infiniband/hw/ipath/ipath_iba6120.c
+@@ -709,6 +709,10 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
+ 		 */
+ 		val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
+ 	}
++
++	/* avoid some intel cpu's speculative read freeze mode issue */
++	val &= ~(INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF
++	   << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
+ 	dd->ipath_hwerrmask = val;
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0140_pcie_coalesce.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0140_pcie_coalesce.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0140_pcie_coalesce.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,130 @@
+IB/ipath - optionally enable Intel chipset pcie coalescing for performance
+
+Enabling pcie coalescing can significantly improve performance with
+infinipath 7220 HCAs on some chipsets.   This feature does not work correctly
+on all chipsets, so leave disabled by default.
+
+Signed-off-by: Dave Olson <dave.olson at qlogic.com>
+
+diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
+--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
++++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
++ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+  *
+  * This software is available to you under a choice of one of two
+@@ -58,6 +58,11 @@
+ module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
+ MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
+ 
++static int ipath_pcie_coalesce;
++module_param_named(pcie_coalesce, ipath_pcie_coalesce, int, S_IRUGO);
++MODULE_PARM_DESC(pcie_coalesce, "tune PCIe coalescing on some Intel chipsets");
++
++
+ /*
+  * This file contains almost all the chip-specific register information and
+  * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
+@@ -1240,6 +1245,90 @@
+ 	ipath_7220_nomsi(dd);
+ }
+ 
++/*
++ * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
++ * chipsets.   This is known to be unsafe for some revisions of some
++ * of these chipsets, with some BIOS settings, and enabling it on those
++ * systems may result in the system crashing, and/or data corruption.
++ */
++static void ipath_7220_tune_pcie_coalesce(struct ipath_devdata *dd)
++{
++	int r;
++	struct pci_dev *parent;
++	int ppos;
++	u16 devid;
++	u32 mask, bits, val;
++
++	if (!ipath_pcie_coalesce)
++		return;
++
++	/* Find out supported and configured values for parent (root) */
++	parent = dd->pcidev->bus->self;
++	if (parent->bus->parent) {
++		dev_info(&dd->pcidev->dev, "Parent not root\n");
++		return;
++	}
++	ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
++	if (!ppos) {
++		ipath_dbg("parent not PCIe root complex!?\n");
++		return;
++	}
++	if (parent->vendor != 0x8086) {
++		ipath_dbg("VendorID 0x%x isn't Intel, skip\n", parent->vendor);
++		return;
++	}
++
++	/*
++	 *  - bit 12: Max_rdcmp_Imt_EN: need to set to 1
++	 *  - bit 11: COALESCE_FORCE: need to set to 0
++	 *  - bit 10: COALESCE_EN: need to set to 1
++	 *  (but limitations on some on some chipsets)
++	 *
++	 *  On the Intel 5000, 5100, and 7300 chipsets, there is
++	 *  also: - bit 25:24: COALESCE_MODE, need to set to 0
++	 *  OLSON OLSON: 10,11,12 may need to be gated by maxpayload
++	 */
++	devid = parent->device;
++	if (devid >= 0x25e2 && devid <= 0x25fa) {
++		/* 5000 P/V/X/Z */
++		u8 rev;
++		pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
++		if (rev <= 0xb2) {
++			bits = 1U << 10;
++			ipath_dbg("Old rev 5000* (0x%x), enable-only\n", rev);
++		} else
++			bits = 7U << 10;
++		mask = (3U << 24) | (7U << 10);
++	} else if (devid >= 0x65e2 && devid <= 0x65fa) {
++		/* 5100 */
++		bits = 1U << 10;
++		mask = (3U << 24) | (7U << 10);
++	} else if (devid >= 0x4021 && devid <= 0x402e) {
++		/* 5400 */
++		bits = 7U << 10;
++		mask = 7U << 10;
++	} else if (devid >= 0x3604 && devid <= 0x360a) {
++		/* 7300 */
++		bits = 7U << 10;
++		mask = (3U << 24) | (7U << 10);
++	} else {
++		/* not one of the chipsets that we know about */
++		ipath_dbg("DeviceID 0x%x isn't one we know, skip\n", devid);
++		return;
++	}
++	pci_read_config_dword(parent, 0x48, &val);
++	ipath_dbg("Read initial value 0x%x at 0x48, deviceid 0x%x\n",
++		val, devid);
++	val &= ~mask;
++	val |= bits;
++	r = pci_write_config_dword(parent, 0x48, val);
++	if (r)
++		ipath_dev_err(dd, "Unable to update deviceid 0x%x to val 0x%x"
++				" for PCIe coalescing\n", devid, val);
++	else
++		dev_info(&dd->pcidev->dev, "Updated deviceid 0x%x to val 0x%x"
++				" for PCIe coalescing\n", devid, val);
++}
+ 
+ static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
+ {
+@@ -1299,6 +1389,8 @@
+ 			"PCIe linkspeed %u is incorrect; "
+ 			"should be 1 (2500)!\n", speed);
+ 
++	ipath_7220_tune_pcie_coalesce(dd);
++
+ bail:
+ 	/* fill in string, even on errors */
+ 	snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0150_ibc_back_to_back_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0150_ibc_back_to_back_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0150_ibc_back_to_back_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,135 @@
+IB/ipath - fix IBC register hazard, can't do back to back writes
+
+A hazard was found that could cause writes to IBC registers to be ignored.
+Write the scratch register after each IBC register write to prevent this.
+No locking for this version.
+
+Signed-off-by: Dave Olson <dave.olson at qlogic.com>
+
+diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
+index ad0aab6..baf9e1f 100644
+--- a/drivers/infiniband/hw/ipath/ipath_driver.c
++++ b/drivers/infiniband/hw/ipath/ipath_driver.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
++ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+  *
+  * This software is available to you under a choice of one of two
+@@ -2098,6 +2098,7 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
+ 		dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ 				 dd->ipath_ibcctrl);
++		ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 
+ 		/* turn heartbeat off, as it causes loopback to fail */
+ 		dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
+@@ -2114,6 +2115,7 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
+ 		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ 				 dd->ipath_ibcctrl);
++		ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 		/* don't wait */
+ 		ret = 0;
+ 		goto bail;
+@@ -2215,6 +2217,7 @@ int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
+ 		dd->ipath_ibcctrl = ibc;
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ 				 dd->ipath_ibcctrl);
++		ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 		dd->ipath_f_tidtemplate(dd);
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
+index 9839e20..809638b 100644
+--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
++++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
+@@ -996,8 +996,10 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
+ 
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
+ 			dd->ipath_ibcddrctrl);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 
+ 	ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 
+ 	/* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
+ 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+@@ -1045,6 +1047,7 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
+ 		ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
+ 			(unsigned long long) guid);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 	return ret;
+ }
+ 
+@@ -2121,6 +2124,7 @@ static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
+ 	dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
+ 			 dd->ipath_ibcddrctrl);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 	if (setforce)
+ 		dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
+ bail:
+@@ -2294,6 +2298,7 @@ static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
+ 		IBA7220_IBC_WIDTH_SHIFT;
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
+ 			dd->ipath_ibcddrctrl);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 	ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
+ }
+ 
+@@ -2313,6 +2318,7 @@ static void try_auto_neg(struct ipath_devdata *dd)
+ 	 */
+ 	ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
+ 		0x3b9dc07);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 	dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
+ 	ipath_autoneg_send(dd, 0);
+ 	set_speed_fast(dd, IPATH_IB_DDR);
+@@ -2404,6 +2410,9 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
+ 				ipath_write_kreg(dd,
+ 					IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
++				ipath_write_kreg(dd,
++					dd->ipath_kregs->kr_scratch,
++					0xfeedbeef);
+ 				symadj = 1;
+ 			}
+ 		}
+ 		/*
+diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
+index be4fc9a..4e5a0ff 100644
+--- a/drivers/infiniband/hw/ipath/ipath_mad.c
++++ b/drivers/infiniband/hw/ipath/ipath_mad.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
++ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+  *
+  * This software is available to you under a choice of one of two
+@@ -184,6 +184,7 @@ static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
+ 			(u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ 				 dd->ipath_ibcctrl);
++		ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 	}
+ 	return 0;
+ }
+@@ -216,6 +217,7 @@ static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
+ 			(u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ 				 dd->ipath_ibcctrl);
++		ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 	}
+ 	return 0;
+ }
+@@ -401,6 +403,7 @@ static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
+ 		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ 			 dd->ipath_ibcctrl);
++	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeedbeef);
+ 	return 0;
+ }
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0160_rc_spinlock.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0160_rc_spinlock.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0160_rc_spinlock.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,132 @@
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c	2009-01-22 11:45:13.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c	2009-01-22 12:03:11.000000000 -0800
+@@ -1268,13 +1268,12 @@ static inline void ipath_rc_rcv_resp(str
+ {
+ 	struct ipath_swqe *wqe;
+ 	enum ib_wc_status status;
+-	unsigned long flags;
+ 	int diff;
+ 	u32 pad;
+ 	u32 aeth;
+ 	u64 val;
+ 
+-	spin_lock_irqsave(&qp->s_lock, flags);
++	spin_lock(&qp->s_lock);
+ 
+ 	/* Double check we can process this now that we hold the s_lock. */
+ 	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+@@ -1377,7 +1376,7 @@ static inline void ipath_rc_rcv_resp(str
+ 		 */
+ 		qp->s_rdma_read_len -= pmtu;
+ 		update_last_psn(qp, psn);
+-		spin_unlock_irqrestore(&qp->s_lock, flags);
++		spin_unlock(&qp->s_lock);
+ 		ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ 		goto bail;
+ 
+@@ -1460,7 +1459,7 @@ ack_err:
+ 		ipath_dbg("Delayed error %d\n", status);
+ 	}
+ ack_done:
+-	spin_unlock_irqrestore(&qp->s_lock, flags);
++	spin_unlock(&qp->s_lock);
+ bail:
+ 	return;
+ }
+@@ -1494,7 +1493,6 @@ static inline int ipath_rc_rcv_error(str
+ 	struct ipath_ack_entry *e;
+ 	u8 i, prev;
+ 	int old_req;
+-	unsigned long flags;
+ 
+ 	if (diff > 0) {
+ 		/*
+@@ -1529,7 +1527,7 @@ static inline int ipath_rc_rcv_error(str
+ 	e = NULL;
+ 	old_req = 1;
+ 
+-	spin_lock_irqsave(&qp->s_lock, flags);
++	spin_lock(&qp->s_lock);
+ 	/* Double check we can process this now that we hold the s_lock. */
+ 	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ 		goto unlock_done;
+@@ -1640,7 +1638,7 @@ static inline int ipath_rc_rcv_error(str
+ 		 * after all the previous RDMA reads and atomics.
+ 		 */
+ 		if (i == qp->r_head_ack_queue) {
+-			spin_unlock_irqrestore(&qp->s_lock, flags);
++			spin_unlock(&qp->s_lock);
+ 			qp->r_nak_state = 0;
+ 			qp->r_ack_psn = qp->r_psn - 1;
+ 			goto send_ack;
+@@ -1653,7 +1651,7 @@ static inline int ipath_rc_rcv_error(str
+ 		if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
+ 		    !(qp->s_flags & IPATH_S_ACK_PENDING) &&
+ 		    qp->s_ack_state == OP(ACKNOWLEDGE)) {
+-			spin_unlock_irqrestore(&qp->s_lock, flags);
++			spin_unlock(&qp->s_lock);
+ 			qp->r_nak_state = 0;
+ 			qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
+ 			goto send_ack;
+@@ -1670,7 +1668,7 @@ static inline int ipath_rc_rcv_error(str
+ 	ipath_schedule_send(qp);
+ 
+ unlock_done:
+-	spin_unlock_irqrestore(&qp->s_lock, flags);
++	spin_unlock(&qp->s_lock);
+ done:
+ 	return 1;
+ 
+@@ -1736,7 +1734,6 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 	int diff;
+ 	struct ib_reth *reth;
+ 	int header_in_data;
+-	unsigned long flags;
+ 
+ 	/* Validate the SLID. See Ch. 9.6.1.5 */
+ 	if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
+@@ -1963,7 +1960,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 		next = qp->r_head_ack_queue + 1;
+ 		if (next > IPATH_MAX_RDMA_ATOMIC)
+ 			next = 0;
+-		spin_lock_irqsave(&qp->s_lock, flags);
++		spin_lock(&qp->s_lock);
+ 		/* Double check we can process this while holding the s_lock. */
+ 		if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ 			goto unlock;
+@@ -2053,7 +2050,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 		next = qp->r_head_ack_queue + 1;
+ 		if (next > IPATH_MAX_RDMA_ATOMIC)
+ 			next = 0;
+-		spin_lock_irqsave(&qp->s_lock, flags);
++		spin_lock(&qp->s_lock);
+ 		/* Double check we can process this while holding the s_lock. */
+ 		if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ 			goto unlock;
+@@ -2133,7 +2130,7 @@ rnr_nak:
+ 	goto send_ack;
+ 
+ nack_inv_unlck:
+-	spin_unlock_irqrestore(&qp->s_lock, flags);
++	spin_unlock(&qp->s_lock);
+ nack_inv:
+ 	ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ 	qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+@@ -2141,7 +2138,7 @@ nack_inv:
+ 	goto send_ack;
+ 
+ nack_acc_unlck:
+-	spin_unlock_irqrestore(&qp->s_lock, flags);
++	spin_unlock(&qp->s_lock);
+ nack_acc:
+ 	ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ 	qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+@@ -2151,7 +2148,7 @@ send_ack:
+ 	goto done;
+ 
+ unlock:
+-	spin_unlock_irqrestore(&qp->s_lock, flags);
++	spin_unlock(&qp->s_lock);
+ done:
+ 	return;
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0170_ruc_loopback.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0170_ruc_loopback.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0170_ruc_loopback.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,18 @@
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-01-22 11:45:13.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-01-23 11:54:46.000000000 -0800
+@@ -660,12 +660,14 @@ void ipath_do_send(unsigned long data)
+ {
+ 	struct ipath_qp *qp = (struct ipath_qp *)data;
+ 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
++	struct ipath_devdata *dd = dev->dd;
+ 	int (*make_req)(struct ipath_qp *qp);
+ 	unsigned long flags;
+ 
+ 	if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ 	     qp->ibqp.qp_type == IB_QPT_UC) &&
+-	    qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
++	    (qp->remote_ah_attr.dlid & ~((1 << dd->ipath_lmc) - 1)) ==
++	    dd->ipath_lid) {
+ 		ipath_ruc_loopback(qp);
+ 		goto bail;
+ 	}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0180_dma_mapping_error.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0180_dma_mapping_error.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0180_dma_mapping_error.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,13 @@
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 20:28:14.000000000 -0800
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-01-25 20:38:20.000000000 -0800
+@@ -739,7 +739,7 @@ retry:
+ 		dw = (len + 3) >> 2;
+ 		addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
+ 				      DMA_TO_DEVICE);
+-		if (dma_mapping_error(addr))
++		if (dma_mapping_error(&dd->pcidev->dev, addr))
+ 			goto unmap;
+ 		make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
+ 		/* SDmaUseLargeBuf has to be set in every descriptor */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0190_rwqe_error_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0190_rwqe_error_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0190_rwqe_error_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,44 @@
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-01-23 12:07:05.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-02-02 14:56:01.000000000 -0800
+@@ -201,22 +201,29 @@ int ipath_get_rwqe(struct ipath_qp *qp, 
+ 	/* Validate tail before using it since it is user writable. */
+ 	if (tail >= rq->size)
+ 		tail = 0;
+-	do {
+-		if (unlikely(tail == wq->head)) {
++	if (unlikely(tail == wq->head)) {
++		ret = 0;
++		goto unlock;
++	}
++	/* Make sure entry is read after head index is read. */
++	smp_rmb();
++	wqe = get_rwqe_ptr(rq, tail);
++	/*
++	 * Even though we update the tail index in memory, the verbs
++	 * consumer is not supposed to post more entries until a
++	 * completion is generated.
++	 */
++	if (++tail >= rq->size)
++		tail = 0;
++	wq->tail = tail;
++	if (!wr_id_only) {
++		qp->r_sge.sg_list = qp->r_sg_list;
++		if (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)) {
+ 			ret = 0;
+ 			goto unlock;
+ 		}
+-		/* Make sure entry is read after head index is read. */
+-		smp_rmb();
+-		wqe = get_rwqe_ptr(rq, tail);
+-		if (++tail >= rq->size)
+-			tail = 0;
+-		if (wr_id_only)
+-			break;
+-		qp->r_sge.sg_list = qp->r_sg_list;
+-	} while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
++	}
+ 	qp->r_wr_id = wqe->wr_id;
+-	wq->tail = tail;
+ 
+ 	ret = 1;
+ 	set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0200_rcvhdrqalign.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0200_rcvhdrqalign.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0200_rcvhdrqalign.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,85 @@
+--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c	2009-02-02 18:15:59.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c	2009-02-02 18:19:53.000000000 -0800
+@@ -1520,13 +1520,17 @@
+ 
+ 	/*
+ 	 * For openfabrics, we need to be able to handle an IB header of
+-	 * 24 dwords.  HT chip has arbitrary sized receive buffers, so we
+-	 * made them the same size as the PIO buffers.  This chip does not
+-	 * handle arbitrary size buffers, so we need the header large enough
+-	 * to handle largest IB header, but still have room for a 2KB MTU
+-	 * standard IB packet.
++	 * at least 24 dwords.  This chip does not handle arbitrary size
++	 * buffers, so we need the header large enough to handle largest
++	 * IB header, but still have room for a 2KB MTU standard IB packet.
++	 * Additionally, some processor/memory controller combinations
++	 * benefit quite strongly from having the DMA'ed data be cacheline
++	 * aligned and a cacheline multiple, so we set the size to 32 dwords
++	 * (2 64-byte primary cachelines for pretty much all processors of
++	 * interest).  The alignment hurts nothing, other than using somewhat
++	 * more memory.
+ 	 */
+-	dd->ipath_rcvhdrentsize = 24;
++	dd->ipath_rcvhdrentsize = 32;
+ 	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
+ 	dd->ipath_rhf_offset = 0;
+ 	dd->ipath_egrtidbase = (u64 __iomem *)
+--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c	2009-02-02 18:15:14.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c	2009-02-02 18:19:06.000000000 -0800
+@@ -1521,7 +1521,7 @@
+ 
+ 	dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
+ 		IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
+-	dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
++	dd->ipath_pioupd_thresh = 8U; /* set default update threshold */
+ 	return 0;
+ }
+ 
+@@ -1962,13 +1962,17 @@
+ 
+ 	/*
+ 	 * For openfabrics, we need to be able to handle an IB header of
+-	 * 24 dwords.  HT chip has arbitrary sized receive buffers, so we
+-	 * made them the same size as the PIO buffers.  This chip does not
+-	 * handle arbitrary size buffers, so we need the header large enough
+-	 * to handle largest IB header, but still have room for a 2KB MTU
+-	 * standard IB packet.
++	 * at least 24 dwords.  This chip does not handle arbitrary size
++	 * buffers, so we need the header large enough to handle largest
++	 * IB header, but still have room for a 2KB MTU standard IB packet.
++	 * Additionally, some processor/memory controller combinations
++	 * benefit quite strongly from having the DMA'ed data be cacheline
++	 * aligned and a cacheline multiple, so we set the size to 32 dwords
++	 * (2 64-byte primary cachelines for pretty much all processors of
++	 * interest).  The alignment hurts nothing, other than using somewhat
++	 * more memory.
+ 	 */
+-	dd->ipath_rcvhdrentsize = 24;
++	dd->ipath_rcvhdrentsize = 32;
+ 	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
+ 	dd->ipath_rhf_offset =
+ 		dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
+@@ -2094,7 +2094,7 @@
+ 			 dd->ipath_rcvctrl);
+ 	dd->ipath_p0_rcvegrcnt = 2048; /* always */
+ 	if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+-		dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
++		dd->ipath_pioreserved = 8; /* kpiobufs used for PIO */
+ }
+ 
+ 
+--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c	2009-02-02 18:15:59.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c	2009-02-02 18:19:53.000000000 -0800
+@@ -927,6 +927,12 @@
+ 	ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
+ 		   "each for %u user ports\n", kpiobufs,
+ 		   piobufs, dd->ipath_pbufsport, uports);
++	if (dd->ipath_pioupd_thresh &&
++		(dd->ipath_pioupd_thresh > dd->ipath_pbufsport - 2)) {
++		dd->ipath_pioupd_thresh = dd->ipath_pbufsport - 2;
++		ipath_cdbg(VERBOSE, "Drop pioupd_thresh to %u\n",
++			dd->ipath_pioupd_thresh);
++	}
+ 	ret = dd->ipath_f_early_init(dd);
+ 	if (ret) {
+ 		ipath_dev_err(dd, "Early initialization failure\n");

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0205_user_sdma_generation.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0205_user_sdma_generation.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0205_user_sdma_generation.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,35 @@
+diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+index 82d9a0b..a4cc9b9 100644
+--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
++++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+@@ -712,6 +712,8 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
+ 	int ret = 0;
+ 	unsigned long flags;
+ 	u16 tail;
++	u8 generation;
++	u64 descq_added;
+ 
+ 	if (list_empty(pktlist))
+ 		return 0;
+@@ -721,6 +723,10 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
+ 
+ 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+ 
++	/* keep a copy for restoring purposes in case of problems */
++	generation = dd->ipath_sdma_generation;
++	descq_added = dd->ipath_sdma_descq_added;
++
+ 	if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
+ 		ret = -ECOMM;
+ 		goto unlock;
+@@ -784,6 +790,10 @@ unlock_check_tail:
+ 	}
+ 
+ unlock:
++	if (unlikely(ret < 0)) {
++		dd->ipath_sdma_generation = generation;
++		dd->ipath_sdma_descq_added = descq_added;
++	}
+ 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+ 
+ 	return ret;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0210_bad_unmap_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0210_bad_unmap_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0210_bad_unmap_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,35 @@
+diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
+index ad0aab6..7cdf7f1 100644
+--- a/drivers/infiniband/hw/ipath/ipath_driver.c
++++ b/drivers/infiniband/hw/ipath/ipath_driver.c
+@@ -1102,7 +1102,7 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
+ 	 * key header.  In order to keep everything dword aligned,
+ 	 * we'll reserve 4 bytes.
+ 	 */
+-	len = dd->ipath_ibmaxlen + 4;
++	len = dd->ipath_init_ibmaxlen + 4;
+ 
+ 	if (dd->ipath_flags & IPATH_4BYTE_TID) {
+ 		/* We need a 2KB multiple alignment, and there is no way
+@@ -2506,7 +2506,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
+ 		for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
+ 			if (skbinfo[e].skb) {
+ 				pci_unmap_single(dd->pcidev, skbinfo[e].phys,
+-						 dd->ipath_ibmaxlen,
++						 dd->ipath_init_ibmaxlen,
+ 						 PCI_DMA_FROMDEVICE);
+ 				dev_kfree_skb(skbinfo[e].skb);
+ 			}
+diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
+index 3e5baa4..39afba1 100644
+--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
++++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
+@@ -126,7 +126,7 @@ static int create_port0_egr(struct ipath_devdata *dd)
+ 		dd->ipath_port0_skbinfo[e].phys =
+ 		  ipath_map_single(dd->pcidev,
+ 				   dd->ipath_port0_skbinfo[e].skb->data,
+-				   dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
++				   dd->ipath_init_ibmaxlen, PCI_DMA_FROMDEVICE);
+ 		dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
+ 				    ((char __iomem *) dd->ipath_kregbase +
+ 				     dd->ipath_rcvegrbase),

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0220_bad_dma_free_coherent_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0220_bad_dma_free_coherent_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0220_bad_dma_free_coherent_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,13 @@
+diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+index 56c0eda..f113e4b 100644
+--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
++++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
+@@ -904,7 +904,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
+ 	chunk = pd->port_rcvegrbuf_chunks;
+ 	egrperchunk = pd->port_rcvegrbufs_perchunk;
+ 	size = pd->port_rcvegrbuf_size;
+-	pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]),
++	pd->port_rcvegrbuf = kzalloc(chunk * sizeof(pd->port_rcvegrbuf[0]),
+ 				     GFP_KERNEL);
+ 	if (!pd->port_rcvegrbuf) {
+ 		ret = -ENOMEM;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0230_rc_send_comp_len.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0230_rc_send_comp_len.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0230_rc_send_comp_len.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,18 @@
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-03 13:55:04.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-03 13:38:51.000000000 -0800
+@@ -940,6 +940,7 @@ void ipath_rc_send_complete(struct ipath
+ 			wc.wr_id = wqe->wr.wr_id;
+ 			wc.status = IB_WC_SUCCESS;
+ 			wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
++			wc.byte_len = wqe->length;
+ 			wc.qp = &qp->ibqp;
+ 			ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ 		}
+@@ -1086,6 +1087,7 @@ static int do_rc_ack(struct ipath_qp *qp
+ 				wc.wr_id = wqe->wr.wr_id;
+ 				wc.status = IB_WC_SUCCESS;
+ 				wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
++				wc.byte_len = wqe->length;
+ 				wc.qp = &qp->ibqp;
+ 				ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ 						0);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0240_rc_mr_refcount.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0240_rc_mr_refcount.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0240_rc_mr_refcount.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+--- a/drivers/infiniband/hw/ipath/ipath_qp.c	2009-02-23 11:47:54.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_qp.c	2009-02-23 12:01:28.000000000 -0800
+@@ -378,9 +378,6 @@ static void clear_mr_refs(struct ipath_q
+ 	}
+ 
+ 	if (clr_sends) {
+-		n = qp->s_last <= qp->s_head ? qp->s_head - qp->s_last :
+-			qp->s_size - qp->s_last + qp->s_head;
+-
+ 		while (qp->s_last != qp->s_head) {
+ 			struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+ 			unsigned i;
+@@ -411,6 +408,8 @@ static void clear_mr_refs(struct ipath_q
+ 
+ 			atomic_dec(&sge->mr->refcount);
+ 		}
++		e->opcode = 0;
++		e->rdma_sge.num_sge = 0;
+ 	}
+ }
+ 
+@@ -576,8 +575,9 @@ int ipath_modify_qp(struct ib_qp *ibqp, 
+ 			tasklet_kill(&qp->s_task);
+ 			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ 			spin_lock_irq(&qp->s_lock);
++			clear_mr_refs(qp, 1);
++			ipath_reset_qp(qp, ibqp->qp_type);
+ 		}
+-		ipath_reset_qp(qp, ibqp->qp_type);
+ 		break;
+ 
+ 	case IB_QPS_SQD:

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0250_rc_src_path.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0250_rc_src_path.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0250_rc_src_path.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c	2009-02-23 12:22:36.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c	2009-02-24 14:57:34.000000000 -0800
+@@ -694,7 +693,8 @@ static void send_rc_ack(struct ipath_qp 
+ 	hdr.lrh[0] = cpu_to_be16(lrh0);
+ 	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ 	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+-	hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
++	hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
++				 qp->remote_ah_attr.src_path_bits);
+ 	ohdr->bth[0] = cpu_to_be32(bth0);
+ 	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ 	ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-02-23 12:22:36.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-02-24 15:23:48.000000000 -0800
+@@ -647,7 +647,8 @@ void ipath_make_ruc_header(struct ipath_
+ 	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ 	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ 	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+-	qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
++	qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
++				       qp->remote_ah_attr.src_path_bits);
+ 	bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
+ 	bth0 |= extra_bytes << 20;
+ 	ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0260_sdma_callback.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0260_sdma_callback.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0260_sdma_callback.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,202 @@
+Only in a/drivers/infiniband/hw/ipath: .ipath_ruc.c.swp
+diff -up a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
+--- a/drivers/infiniband/hw/ipath/ipath_kernel.h	2009-02-27 11:48:31.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_kernel.h	2009-03-02 18:15:57.000000000 -0800
+@@ -210,7 +210,6 @@ struct ipath_sdma_txreq {
+ 	};
+ 	void              (*callback)(void *, int);
+ 	void               *callback_cookie;
+-	int                 callback_status;
+ 	u16                 start_idx;  /* sdma private */
+ 	u16                 next_descq_idx;  /* sdma private */
+ 	struct list_head    list;       /* sdma private */
+@@ -522,9 +521,7 @@ struct ipath_devdata {
+ 	u16                   ipath_sdma_reset_wait;
+ 	u8                    ipath_sdma_generation;
+ 	struct tasklet_struct ipath_sdma_abort_task;
+-	struct tasklet_struct ipath_sdma_notify_task;
+ 	struct list_head      ipath_sdma_activelist;
+-	struct list_head      ipath_sdma_notifylist;
+ 	atomic_t              ipath_sdma_vl15_count;
+ 	struct timer_list     ipath_sdma_vl15_timer;
+ 
+diff -up a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
+--- a/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-03-03 11:24:26.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_sdma.c	2009-03-03 17:43:02.000000000 -0800
+@@ -131,76 +131,28 @@ int ipath_sdma_make_progress(struct ipat
+ 			dd->ipath_sdma_descq_head = 0;
+ 
+ 		if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
+-			/* move to notify list */
++			/* remove from active list */
++			list_del_init(&txp->list);
++			if (txp->callback)
++				(*txp->callback)(txp->callback_cookie,
++						 IPATH_SDMA_TXREQ_S_OK);
+ 			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
+ 				vl15_watchdog_deq(dd);
+-			list_move_tail(lp, &dd->ipath_sdma_notifylist);
+ 			if (!list_empty(&dd->ipath_sdma_activelist)) {
+ 				lp = dd->ipath_sdma_activelist.next;
+ 				txp = list_entry(lp, struct ipath_sdma_txreq,
+ 						 list);
+ 				start_idx = txp->start_idx;
+-			} else {
+-				lp = NULL;
++			} else
+ 				txp = NULL;
+-			}
+ 		}
+ 		progress = 1;
+ 	}
+ 
+-	if (progress)
+-		tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
+-
+ done:
+ 	return progress;
+ }
+ 
+-static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
+-{
+-	struct ipath_sdma_txreq *txp, *txp_next;
+-
+-	list_for_each_entry_safe(txp, txp_next, list, list) {
+-		list_del_init(&txp->list);
+-
+-		if (txp->callback)
+-			(*txp->callback)(txp->callback_cookie,
+-					 txp->callback_status);
+-	}
+-}
+-
+-static void sdma_notify_taskbody(struct ipath_devdata *dd)
+-{
+-	unsigned long flags;
+-	struct list_head list;
+-
+-	INIT_LIST_HEAD(&list);
+-
+-	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+-
+-	list_splice_init(&dd->ipath_sdma_notifylist, &list);
+-
+-	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+-
+-	ipath_sdma_notify(dd, &list);
+-
+-	/*
+-	 * The IB verbs layer needs to see the callback before getting
+-	 * the call to ipath_ib_piobufavail() because the callback
+-	 * handles releasing resources the next send will need.
+-	 * Otherwise, we could do these calls in
+-	 * ipath_sdma_make_progress().
+-	 */
+-	ipath_ib_piobufavail(dd->verbs_dev);
+-}
+-
+-static void sdma_notify_task(unsigned long opaque)
+-{
+-	struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
+-
+-	if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+-		sdma_notify_taskbody(dd);
+-}
+-
+ static void dump_sdma_state(struct ipath_devdata *dd)
+ {
+ 	unsigned long reg;
+@@ -258,7 +210,6 @@ static void sdma_abort_task(unsigned lon
+ 	if (status == IPATH_SDMA_ABORT_ABORTED) {
+ 		struct ipath_sdma_txreq *txp, *txpnext;
+ 		u64 hwstatus;
+-		int notify = 0;
+ 
+ 		hwstatus = ipath_read_kreg64(dd,
+ 				dd->ipath_kregs->kr_senddmastatus);
+@@ -280,14 +231,13 @@ static void sdma_abort_task(unsigned lon
+ 		/* dequeue all "sent" requests */
+ 		list_for_each_entry_safe(txp, txpnext,
+ 					 &dd->ipath_sdma_activelist, list) {
+-			txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
++			list_del_init(&txp->list);
++			if (txp->callback)
++				(*txp->callback)(txp->callback_cookie,
++						 IPATH_SDMA_TXREQ_S_ABORTED);
+ 			if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
+ 				vl15_watchdog_deq(dd);
+-			list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
+-			notify = 1;
+ 		}
+-		if (notify)
+-			tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
+ 
+ 		/* reset our notion of head and tail */
+ 		dd->ipath_sdma_descq_tail = 0;
+@@ -480,10 +430,7 @@ int setup_sdma(struct ipath_devdata *dd)
+ 			 senddmabufmask[2]);
+ 
+ 	INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
+-	INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
+ 
+-	tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
+-		     (unsigned long) dd);
+ 	tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
+ 		     (unsigned long) dd);
+ 
+@@ -520,7 +467,6 @@ void teardown_sdma(struct ipath_devdata 
+ 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+ 
+ 	tasklet_kill(&dd->ipath_sdma_abort_task);
+-	tasklet_kill(&dd->ipath_sdma_notify_task);
+ 
+ 	/* turn off sdma */
+ 	spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+@@ -534,15 +480,15 @@ void teardown_sdma(struct ipath_devdata 
+ 	/* dequeue all "sent" requests */
+ 	list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
+ 				 list) {
+-		txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
++		list_del_init(&txp->list);
++		if (txp->callback)
++			(*txp->callback)(txp->callback_cookie,
++					 IPATH_SDMA_TXREQ_S_SHUTDOWN);
+ 		if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
+ 			vl15_watchdog_deq(dd);
+-		list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
+ 	}
+ 	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+ 
+-	sdma_notify_taskbody(dd);
+-
+ 	del_timer_sync(&dd->ipath_sdma_vl15_timer);
+ 
+ 	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+@@ -787,13 +733,12 @@ retry:
+ 		descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
+ 	}
+ 
++	tx->txreq.next_descq_idx = tail;
++	dd->ipath_sdma_descq_tail = tail;
+ 	/* Commit writes to memory and advance the tail on the chip */
+ 	wmb();
+ 	ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
+ 
+-	tx->txreq.next_descq_idx = tail;
+-	tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
+-	dd->ipath_sdma_descq_tail = tail;
+ 	dd->ipath_sdma_descq_added += tx->txreq.sg_count;
+ 	list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
+ 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
+diff -up a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-02-27 11:48:31.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-03-03 14:49:29.000000000 -0800
+@@ -1083,6 +1083,8 @@ static void sdma_complete(void *cookie, 
+ 
+ 	if (atomic_dec_and_test(&qp->refcount))
+ 		wake_up(&qp->wait);
++
++	ipath_ib_piobufavail(dev);
+ }
+ 
+ static void decrement_dma_busy(struct ipath_qp *qp)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0270_mr_zero_refcount.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0270_mr_zero_refcount.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0270_mr_zero_refcount.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+diff -up ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_keys.c c/drivers/infiniband/hw/ipath/ipath_keys.c
+--- ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_keys.c	2009-03-03 17:55:26.000000000 -0800
++++ c/drivers/infiniband/hw/ipath/ipath_keys.c	2009-03-05 16:29:36.000000000 -0800
+@@ -104,8 +104,11 @@ int ipath_free_lkey(struct ipath_ibdev *
+ 	if (lkey == 0) {
+ 		if (dev->dma_mr) {
+ 			ret = atomic_read(&dev->dma_mr->refcount);
+-			if (!ret && dev->dma_mr == mr)
+-				dev->dma_mr = NULL;
++			if (dev->dma_mr == mr) {
++				if (!ret)
++					dev->dma_mr = NULL;
++			} else
++				ret = 0;
+ 		} else
+ 			ret = 0;
+ 	} else {
+@@ -117,8 +120,7 @@ int ipath_free_lkey(struct ipath_ibdev *
+ 	spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+ 
+ 	if (ret) {
+-		ipath_dbg("ipath_free_lkey: MR busy (LKEY %x cnt %u)\n",
+-			  lkey, ret);
++		ipath_dbg("MR busy (LKEY %x cnt %u)\n", lkey, ret);
+ 		ret = -EBUSY;
+ 	}
+ 	return ret;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0280_user_sdma_head_optim.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0280_user_sdma_head_optim.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0280_user_sdma_head_optim.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,14 @@
+diff -up ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_user_sdma.c c/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+--- ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-03-03 17:55:38.000000000 -0800
++++ c/drivers/infiniband/hw/ipath/ipath_user_sdma.c	2009-03-05 15:53:17.000000000 -0800
+@@ -672,8 +672,8 @@ static inline __le64 ipath_sdma_make_fir
+ 
+ static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
+ {
+-					      /* last */  /* dma head */
+-	return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13);
++					      /* last */
++	return descq | __constant_cpu_to_le64(1ULL << 11);
+ }
+ 
+ static inline __le64 ipath_sdma_make_desc1(u64 addr)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0290_fix_rc_unaligned_pkts.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0290_fix_rc_unaligned_pkts.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0290_fix_rc_unaligned_pkts.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,11 @@
+diff -up ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_verbs.c c/drivers/infiniband/hw/ipath/ipath_verbs.c
+--- ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-03-03 17:55:38.000000000 -0800
++++ c/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-03-05 16:36:44.000000000 -0800
+@@ -1218,6 +1218,7 @@ static int ipath_verbs_send_dma(struct i
+ 	tx->txreq.map_addr = piobuf;
+ 	tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
+ 	tx->txreq.sg_count = 1;
++	memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
+ 
+ 	*piobuf++ = (__force u32) cpu_to_le32(plen);
+ 	*piobuf++ = (__force u32) cpu_to_le32(control);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0300_limit_stats_output.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0300_limit_stats_output.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0300_limit_stats_output.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -up ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_verbs.c c/drivers/infiniband/hw/ipath/ipath_verbs.c
+--- ofed_kernel-2.6.18-EL5.1/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-03-03 17:55:38.000000000 -0800
++++ c/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-03-05 16:36:44.000000000 -0800
+@@ -2348,10 +2349,18 @@ static ssize_t show_stats(struct class_d
+ 	for (i = 0; i < qpt->max; i++) {
+ 		struct ipath_qp *qp;
+ 		for (qp = qpt->table[i]; qp != NULL; qp = qp->next) {
++			if (qp->s_last == qp->s_acked &&
++			    qp->s_acked == qp->s_cur &&
++			    qp->s_cur == qp->s_tail &&
++			    qp->s_tail == qp->s_head)
++				continue;
++			if (len + 128 >= PAGE_SIZE)
++				break;
+ 			len += sprintf(buf + len,
+-				"QP%u %x PSN %x %x %x %x %x (%u %u %u %u %u)\n",
++			    "QP%u %x %u PSN %x %x %x %x %x (%u %u %u %u %u)\n",
+ 				qp->ibqp.qp_num,
+ 				qp->s_flags,
++				atomic_read(&qp->s_dma_busy),
+ 				qp->s_last_psn,
+ 				qp->s_psn,
+ 				qp->s_next_psn,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0310_rdma_read_mr.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0310_rdma_read_mr.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0310_rdma_read_mr.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,512 @@
+diff -up a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
+--- a/drivers/infiniband/hw/ipath/ipath_keys.c	2009-03-05 17:37:11.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_keys.c	2009-03-06 16:04:19.000000000 -0800
+@@ -217,20 +217,17 @@ bail:
+  *
+  * Return 1 if successful, otherwise 0.
+  */
+-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
++int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge *sge,
+ 		  u32 len, u64 vaddr, u32 rkey, int acc)
+ {
+ 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ 	struct ipath_lkey_table *rkt = &dev->lk_table;
+-	struct ipath_sge *sge = &ss->sge;
+ 	struct ipath_mregion *mr;
+ 	unsigned n, m;
+ 	size_t off;
+ 	int ret = 0;
+ 	unsigned long flags;
+ 
+-	ss->num_sge = 0;
+-
+ 	/*
+ 	 * We use RKEY == zero for kernel virtual addresses
+ 	 * (see ipath_get_dma_mr and ipath_dma.c).
+@@ -251,8 +248,6 @@ int ipath_rkey_ok(struct ipath_qp *qp, s
+ 		sge->sge_length = len;
+ 		sge->m = 0;
+ 		sge->n = 0;
+-		ss->sg_list = NULL;
+-		ss->num_sge = 1;
+ 		goto ok;
+ 	}
+ 
+@@ -284,8 +279,6 @@ int ipath_rkey_ok(struct ipath_qp *qp, s
+ 	sge->sge_length = len;
+ 	sge->m = m;
+ 	sge->n = n;
+-	ss->sg_list = NULL;
+-	ss->num_sge = 1;
+ ok:
+ 	ret = 1;
+ bail:
+diff -up a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
+--- a/drivers/infiniband/hw/ipath/ipath_qp.c	2009-03-05 17:37:22.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_qp.c	2009-03-09 15:41:46.000000000 -0700
+@@ -390,26 +390,23 @@ static void clear_mr_refs(struct ipath_q
+ 			if (++qp->s_last >= qp->s_size)
+ 				qp->s_last = 0;
+ 		}
++		if (qp->s_rdma_mr) {
++			atomic_dec(&qp->s_rdma_mr->refcount);
++			qp->s_rdma_mr = NULL;
++		}
+ 	}
+ 
+ 	if (qp->ibqp.qp_type != IB_QPT_RC)
+ 		return;
+ 
+-	/* XXX Need to be sure that none of these are actively being sent */
+ 	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ 		struct ipath_ack_entry *e = &qp->s_ack_queue[n];
+-		unsigned i;
+-
+-		if (e->opcode != IB_OPCODE_RC_RDMA_READ_REQUEST)
+-			continue;
+-		for (i = 0; i < e->rdma_sge.num_sge; i++) {
+-			struct ipath_sge *sge = i ?
+-				&e->rdma_sge.sg_list[i - 1] : &e->rdma_sge.sge;
+ 
+-			atomic_dec(&sge->mr->refcount);
++		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
++		    e->rdma_sge.mr) {
++			atomic_dec(&e->rdma_sge.mr->refcount);
++			e->rdma_sge.mr = NULL;
+ 		}
+-		e->opcode = 0;
+-		e->rdma_sge.num_sge = 0;
+ 	}
+ }
+ 
+@@ -855,7 +852,7 @@ struct ib_qp *ipath_create_qp(struct ib_
+ 		} else if (init_attr->cap.max_recv_sge > 1)
+ 			sg_list_sz = sizeof(*qp->r_sg_list) *
+ 				(init_attr->cap.max_recv_sge - 1);
+-		qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
++		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+ 		if (!qp) {
+ 			ret = ERR_PTR(-ENOMEM);
+ 			goto bail_swq;
+@@ -870,14 +867,9 @@ struct ib_qp *ipath_create_qp(struct ib_
+ 			}
+ 		} else
+ 			qp->r_ud_sg_list = NULL;
+-		if (init_attr->srq) {
++		if (init_attr->srq)
+ 			sz = 0;
+-			qp->r_rq.size = 0;
+-			qp->r_rq.max_sge = 0;
+-			qp->r_rq.wq = NULL;
+-			init_attr->cap.max_recv_wr = 0;
+-			init_attr->cap.max_recv_sge = 0;
+-		} else {
++		else {
+ 			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+@@ -910,8 +902,6 @@ struct ib_qp *ipath_create_qp(struct ib_
+ 		qp->s_max_sge = init_attr->cap.max_send_sge;
+ 		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ 			qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
+-		else
+-			qp->s_flags = 0;
+ 		dev = to_idev(ibpd->device);
+ 		err = ipath_alloc_qpn(&dev->qp_table, qp,
+ 				      init_attr->qp_type);
+@@ -920,8 +910,6 @@ struct ib_qp *ipath_create_qp(struct ib_
+ 			vfree(qp->r_rq.wq);
+ 			goto bail_sg_list;
+ 		}
+-		qp->ip = NULL;
+-		qp->s_tx = NULL;
+ 		ipath_reset_qp(qp, init_attr->qp_type);
+ 		break;
+ 
+@@ -1040,6 +1028,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
+ 	ipath_free_qp(&dev->qp_table, qp);
+ 
+ 	if (qp->s_tx) {
++		if (qp->s_tx->mr) {
++			atomic_dec(&qp->s_tx->mr->refcount);
++			qp->s_tx->mr = NULL;
++		}
+ 		atomic_dec(&qp->refcount);
+ 		if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
+ 			kfree(qp->s_tx->txreq.map_addr);
+diff -up a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-05 17:37:11.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-09 17:40:39.000000000 -0700
+@@ -124,16 +124,31 @@ static int ipath_make_rc_ack(struct ipat
+ 
+ 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ 		if (e->opcode == OP(RDMA_READ_REQUEST)) {
++			/*
++			 * If a RDMA read response is being resent and
++			 * we haven't seen the duplicate request yet,
++			 * then stop sending the remaining responses the
++			 * responder has seen until the requester resends it.
++			 */
++			if (!e->rdma_sge.mr) {
++				qp->s_tail_ack_queue = qp->r_head_ack_queue;
++				qp->s_ack_state = OP(ACKNOWLEDGE);
++				goto bail;
++			}
+ 			/* Copy SGE state in case we need to resend */
+-			qp->s_ack_rdma_sge = e->rdma_sge;
++			qp->s_rdma_mr = e->rdma_sge.mr;
++			qp->s_ack_rdma_sge.sge = e->rdma_sge;
++			qp->s_ack_rdma_sge.num_sge = 1;
+ 			qp->s_cur_sge = &qp->s_ack_rdma_sge;
+-			len = e->rdma_sge.sge.sge_length;
++			len = e->rdma_sge.sge_length;
+ 			if (len > pmtu) {
+ 				len = pmtu;
+ 				qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
++				atomic_inc(&qp->s_rdma_mr->refcount);
+ 			} else {
+ 				qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ 				e->sent = 1;
++				e->rdma_sge.mr = NULL;
+ 			}
+ 			ohdr->u.aeth = ipath_compute_aeth(qp);
+ 			hwords++;
+@@ -160,14 +175,19 @@ static int ipath_make_rc_ack(struct ipat
+ 		qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ 		/* FALLTHROUGH */
+ 	case OP(RDMA_READ_RESPONSE_MIDDLE):
++		qp->s_cur_sge = &qp->s_ack_rdma_sge;
++		qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
+ 		len = qp->s_ack_rdma_sge.sge.sge_length;
+-		if (len > pmtu)
++		if (len > pmtu) {
+ 			len = pmtu;
+-		else {
++			atomic_inc(&qp->s_rdma_mr->refcount);
++		} else {
+ 			ohdr->u.aeth = ipath_compute_aeth(qp);
+ 			hwords++;
+ 			qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+-			qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
++			e = &qp->s_ack_queue[qp->s_tail_ack_queue];
++			e->sent = 1;
++			e->rdma_sge.mr = NULL;
+ 		}
+ 		bth0 = qp->s_ack_state << 24;
+ 		bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
+@@ -915,13 +935,12 @@ void ipath_rc_send_complete(struct ipath
+ 		ohdr = &hdr->u.l.oth;
+ 
+ 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
++	psn = be32_to_cpu(ohdr->bth[2]);
++
+ 	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+-	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+-		/* XXX Need to handle MR refcount similar to requester */
++	    opcode <= OP(ATOMIC_ACKNOWLEDGE))
+ 		return;
+-	}
+ 
+-	psn = be32_to_cpu(ohdr->bth[2]);
+ 	reset_sending_psn(qp, psn);
+ 
+ 	while (qp->s_last != qp->s_acked) {
+@@ -1587,13 +1606,11 @@ static inline int ipath_rc_rcv_error(str
+ 		offset = ((psn - e->psn) & IPATH_PSN_MASK) *
+ 			ib_mtu_enum_to_int(qp->path_mtu);
+ 		len = be32_to_cpu(reth->length);
+-		if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
++		if (unlikely(offset + len > e->rdma_sge.sge_length))
+ 			goto unlock_done;
+-		for (i = 0; i < e->rdma_sge.num_sge; i++) {
+-			struct ipath_sge *sge = i ?
+-				&e->rdma_sge.sg_list[i - 1] : &e->rdma_sge.sge;
+-
+-			atomic_dec(&sge->mr->refcount);
++		if (e->rdma_sge.mr) {
++			atomic_dec(&e->rdma_sge.mr->refcount);
++			e->rdma_sge.mr = NULL;
+ 		}
+ 		if (len != 0) {
+ 			u32 rkey = be32_to_cpu(reth->rkey);
+@@ -1606,12 +1623,9 @@ static inline int ipath_rc_rcv_error(str
+ 			if (unlikely(!ok))
+ 				goto unlock_done;
+ 		} else {
+-			e->rdma_sge.sg_list = NULL;
+-			e->rdma_sge.num_sge = 0;
+-			e->rdma_sge.sge.mr = NULL;
+-			e->rdma_sge.sge.vaddr = NULL;
+-			e->rdma_sge.sge.length = 0;
+-			e->rdma_sge.sge.sge_length = 0;
++			e->rdma_sge.vaddr = NULL;
++			e->rdma_sge.length = 0;
++			e->rdma_sge.sge_length = 0;
+ 		}
+ 		e->psn = psn;
+ 		qp->s_ack_state = OP(ACKNOWLEDGE);
+@@ -1705,10 +1719,8 @@ static inline void ipath_update_ack_queu
+ 	next = n + 1;
+ 	if (next > IPATH_MAX_RDMA_ATOMIC)
+ 		next = 0;
+-	if (n == qp->s_tail_ack_queue) {
+-		qp->s_tail_ack_queue = next;
+-		qp->s_ack_state = OP(ACKNOWLEDGE);
+-	}
++	qp->s_tail_ack_queue = next;
++	qp->s_ack_state = OP(ACKNOWLEDGE);
+ }
+ 
+ /**
+@@ -1925,19 +1937,20 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 		hdrsize += sizeof(*reth);
+ 		qp->r_len = be32_to_cpu(reth->length);
+ 		qp->r_rcv_len = 0;
++		qp->r_sge.sg_list = NULL;
+ 		if (qp->r_len != 0) {
+ 			u32 rkey = be32_to_cpu(reth->rkey);
+ 			u64 vaddr = be64_to_cpu(reth->vaddr);
+ 			int ok;
+ 
+ 			/* Check rkey & NAK */
+-			ok = ipath_rkey_ok(qp, &qp->r_sge,
++			ok = ipath_rkey_ok(qp, &qp->r_sge.sge,
+ 					   qp->r_len, vaddr, rkey,
+ 					   IB_ACCESS_REMOTE_WRITE);
+ 			if (unlikely(!ok))
+ 				goto nack_acc;
++			qp->r_sge.num_sge = 1;
+ 		} else {
+-			qp->r_sge.sg_list = NULL;
+ 			qp->r_sge.num_sge = 0;
+ 			qp->r_sge.sge.mr = NULL;
+ 			qp->r_sge.sge.vaddr = NULL;
+@@ -1973,17 +1986,9 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 			ipath_update_ack_queue(qp, next);
+ 		}
+ 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
+-		if (e->opcode == OP(RDMA_READ_REQUEST)) {
+-			unsigned i;
+-
+-			for (i = 0; i < e->rdma_sge.num_sge; i++) {
+-				struct ipath_sge *sge = i ?
+-					&e->rdma_sge.sg_list[i - 1] :
+-					&e->rdma_sge.sge;
+-
+-				atomic_dec(&sge->mr->refcount);
+-			}
+-			e->opcode = 0;
++		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
++			atomic_dec(&e->rdma_sge.mr->refcount);
++			e->rdma_sge.mr = NULL;
+ 		}
+ 		/* RETH comes after BTH */
+ 		if (!header_in_data)
+@@ -2010,12 +2015,10 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 			if (len > pmtu)
+ 				qp->r_psn += (len - 1) / pmtu;
+ 		} else {
+-			e->rdma_sge.sg_list = NULL;
+-			e->rdma_sge.num_sge = 0;
+-			e->rdma_sge.sge.mr = NULL;
+-			e->rdma_sge.sge.vaddr = NULL;
+-			e->rdma_sge.sge.length = 0;
+-			e->rdma_sge.sge.sge_length = 0;
++			e->rdma_sge.mr = NULL;
++			e->rdma_sge.vaddr = NULL;
++			e->rdma_sge.length = 0;
++			e->rdma_sge.sge_length = 0;
+ 		}
+ 		e->opcode = opcode;
+ 		e->sent = 0;
+@@ -2063,17 +2066,9 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 			ipath_update_ack_queue(qp, next);
+ 		}
+ 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
+-		if (e->opcode == OP(RDMA_READ_REQUEST)) {
+-			unsigned i;
+-
+-			for (i = 0; i < e->rdma_sge.num_sge; i++) {
+-				struct ipath_sge *sge = i ?
+-					&e->rdma_sge.sg_list[i - 1] :
+-					&e->rdma_sge.sge;
+-
+-				atomic_dec(&sge->mr->refcount);
+-			}
+-			e->opcode = 0;
++		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
++			atomic_dec(&e->rdma_sge.mr->refcount);
++			e->rdma_sge.mr = NULL;
+ 		}
+ 		if (!header_in_data)
+ 			ateth = &ohdr->u.atomic_eth;
+@@ -2085,7 +2080,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
+ 			goto nack_inv_unlck;
+ 		rkey = be32_to_cpu(ateth->rkey);
+ 		/* Check rkey & NAK */
+-		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
++		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge.sge,
+ 					    sizeof(u64), vaddr, rkey,
+ 					    IB_ACCESS_REMOTE_ATOMIC)))
+ 			goto nack_acc_unlck;
+diff -up a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-03-05 17:37:11.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c	2009-03-06 16:08:18.000000000 -0800
+@@ -367,23 +367,27 @@ again:
+ 			goto inv_err;
+ 		if (wqe->length == 0)
+ 			break;
+-		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
++		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ 					    wqe->wr.wr.rdma.remote_addr,
+ 					    wqe->wr.wr.rdma.rkey,
+ 					    IB_ACCESS_REMOTE_WRITE)))
+ 			goto acc_err;
++		qp->r_sge.sg_list = NULL;
++		qp->r_sge.num_sge = 1;
+ 		qp->r_sge.total_len = wqe->length;
+ 		break;
+ 
+ 	case IB_WR_RDMA_READ:
+ 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ 			goto inv_err;
+-		if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
++		if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ 					    wqe->wr.wr.rdma.remote_addr,
+ 					    wqe->wr.wr.rdma.rkey,
+ 					    IB_ACCESS_REMOTE_READ)))
+ 			goto acc_err;
+ 		release = 0;
++		sqp->s_sge.sg_list = NULL;
++		sqp->s_sge.num_sge = 1;
+ 		qp->r_sge.sge = wqe->sg_list[0];
+ 		qp->r_sge.sg_list = wqe->sg_list + 1;
+ 		qp->r_sge.num_sge = wqe->wr.num_sge;
+@@ -394,7 +398,7 @@ again:
+ 	case IB_WR_ATOMIC_FETCH_AND_ADD:
+ 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ 			goto inv_err;
+-		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
++		if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ 					    wqe->wr.wr.atomic.remote_addr,
+ 					    wqe->wr.wr.atomic.rkey,
+ 					    IB_ACCESS_REMOTE_ATOMIC)))
+diff -up a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
+--- a/drivers/infiniband/hw/ipath/ipath_uc.c	2009-03-05 17:37:09.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_uc.c	2009-03-06 16:09:21.000000000 -0800
+@@ -467,21 +467,22 @@ void ipath_uc_rcv(struct ipath_ibdev *de
+ 		hdrsize += sizeof(*reth);
+ 		qp->r_len = be32_to_cpu(reth->length);
+ 		qp->r_rcv_len = 0;
++		qp->r_sge.sg_list = NULL;
+ 		if (qp->r_len != 0) {
+ 			u32 rkey = be32_to_cpu(reth->rkey);
+ 			u64 vaddr = be64_to_cpu(reth->vaddr);
+ 			int ok;
+ 
+ 			/* Check rkey */
+-			ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
++			ok = ipath_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ 					   vaddr, rkey,
+ 					   IB_ACCESS_REMOTE_WRITE);
+ 			if (unlikely(!ok)) {
+ 				dev->n_pkt_drops++;
+ 				goto done;
+ 			}
++			qp->r_sge.num_sge = 1;
+ 		} else {
+-			qp->r_sge.sg_list = NULL;
+ 			qp->r_sge.num_sge = 0;
+ 			qp->r_sge.sge.mr = NULL;
+ 			qp->r_sge.sge.vaddr = NULL;
+diff -up a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-03-05 17:37:22.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.c	2009-03-09 16:18:22.000000000 -0700
+@@ -1077,6 +1077,10 @@ static void sdma_complete(void *cookie, 
+ 		spin_unlock_irqrestore(&qp->s_lock, flags);
+ 	}
+ 
++	if (tx->mr) {
++		atomic_dec(&tx->mr->refcount);
++		tx->mr = NULL;
++	}
+ 	if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
+ 		kfree(tx->txreq.map_addr);
+ 	put_txreq(dev, tx);
+@@ -1166,6 +1170,9 @@ static int ipath_verbs_send_dma(struct i
+ 	tx->qp = qp;
+ 	atomic_inc(&qp->refcount);
+ 	tx->wqe = qp->s_wqe;
++	tx->mr = qp->s_rdma_mr;
++	if (qp->s_rdma_mr)
++		qp->s_rdma_mr = NULL;
+ 	tx->txreq.callback = sdma_complete;
+ 	tx->txreq.callback_cookie = tx;
+ 	tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
+@@ -1242,6 +1249,10 @@ static int ipath_verbs_send_dma(struct i
+ 	goto bail;
+ 
+ err_tx:
++	if (tx->mr) {
++		atomic_dec(&tx->mr->refcount);
++		tx->mr = NULL;
++	}
+ 	if (atomic_dec_and_test(&qp->refcount))
+ 		wake_up(&qp->wait);
+ 	put_txreq(dev, tx);
+@@ -1331,6 +1342,10 @@ static int ipath_verbs_send_pio(struct i
+ 	}
+ 	copy_io(piobuf, ss, len, flush_wc);
+ done:
++	if (qp->s_rdma_mr) {
++		atomic_dec(&qp->s_rdma_mr->refcount);
++		qp->s_rdma_mr = NULL;
++	}
+ 	if (qp->s_wqe) {
+ 		spin_lock_irqsave(&qp->s_lock, flags);
+ 		ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+diff -up a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.h	2009-03-05 17:37:09.000000000 -0800
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.h	2009-03-09 17:46:15.000000000 -0700
+@@ -331,7 +331,6 @@ struct ipath_sge_state {
+ 	struct ipath_sge sge;   /* progress state for the current SGE */
+ 	u32 total_len;
+ 	u8 num_sge;
+-	u8 static_rate;
+ };
+ 
+ /*
+@@ -343,7 +342,7 @@ struct ipath_ack_entry {
+ 	u8 sent;
+ 	u32 psn;
+ 	union {
+-		struct ipath_sge_state rdma_sge;
++		struct ipath_sge rdma_sge;
+ 		u64 atomic_data;
+ 	};
+ };
+@@ -372,6 +371,7 @@ struct ipath_qp {
+ 	struct ipath_mmap_info *ip;
+ 	struct ipath_sge_state *s_cur_sge;
+ 	struct ipath_verbs_txreq *s_tx;
++	struct ipath_mregion *s_rdma_mr;
+ 	struct ipath_sge_state s_sge;	/* current send request data */
+ 	struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
+ 	struct ipath_sge_state s_ack_rdma_sge;
+@@ -654,6 +654,7 @@ struct ipath_verbs_txreq {
+ 	struct ipath_swqe       *wqe;
+ 	u32                      map_len;
+ 	u32                      len;
++	struct ipath_mregion	*mr;
+ 	struct ipath_sge_state  *ss;
+ 	struct ipath_pio_header  hdr;
+ 	struct ipath_sdma_txreq  txreq;
+@@ -795,7 +796,7 @@ int ipath_free_lkey(struct ipath_ibdev *
+ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
+ 		  struct ib_sge *sge, int acc);
+ 
+-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
++int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge *sge,
+ 		  u32 len, u64 vaddr, u32 rkey, int acc);
+ 
+ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0320_fix_gsi_pkey_index.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0320_fix_gsi_pkey_index.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0320_fix_gsi_pkey_index.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,59 @@
+diff -up a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
+--- a/drivers/infiniband/hw/ipath/ipath_ud.c	2009-03-09 18:09:24.000000000 -0700
++++ b/drivers/infiniband/hw/ipath/ipath_ud.c	2009-03-09 18:51:50.000000000 -0700
+@@ -228,8 +228,8 @@ static void ipath_ud_loopback(struct ipa
+ 	wc.opcode = IB_WC_RECV;
+ 	wc.qp = &qp->ibqp;
+ 	wc.src_qp = sqp->ibqp.qp_num;
+-	/* XXX do we know which pkey matched? Only needed for GSI. */
+-	wc.pkey_index = 0;
++	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
++		swqe->wr.wr.ud.pkey_index : 0;
+ 	wc.slid = dev->dd->ipath_lid |
+ 		(ah_attr->src_path_bits &
+ 		 ((1 << dev->dd->ipath_lmc) - 1));
+@@ -379,7 +379,8 @@ int ipath_make_ud_req(struct ipath_qp *q
+ 		bth0 |= 1 << 23;
+ 	bth0 |= extra_bytes << 20;
+ 	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
+-		ipath_get_pkey(dev->dd, qp->s_pkey_index);
++		ipath_get_pkey(dev->dd, qp->ibqp.qp_type == IB_QPT_GSI ?
++				wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
+ 	ohdr->bth[0] = cpu_to_be32(bth0);
+ 	/*
+ 	 * Use the multicast QP if the destination LID is a multicast LID.
+@@ -408,6 +409,23 @@ unlock:
+ 	return ret;
+ }
+ 
++static unsigned ipath_lookup_pkey(struct ipath_devdata *dd, u16 pkey)
++{
++	unsigned i;
++
++	pkey &= 0x7fff;	/* remove limited/full membership bit */
++
++	for (i = 0; i < ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); ++i)
++		if ((dd->ipath_pd[0]->port_pkeys[i] & 0x7fff) == pkey)
++			return i;
++
++	/*
++	 * Should not get here, this means hardware failed to validate pkeys.
++	 * Punt and return index 0.
++	 */
++	return 0;
++}
++
+ /**
+  * ipath_ud_rcv - receive an incoming UD packet
+  * @dev: the device the packet came in on
+@@ -580,8 +598,8 @@ void ipath_ud_rcv(struct ipath_ibdev *de
+ 	wc.vendor_err = 0;
+ 	wc.qp = &qp->ibqp;
+ 	wc.src_qp = src_qp;
+-	/* XXX do we know which pkey matched? Only needed for GSI. */
+-	wc.pkey_index = 0;
++	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
++		ipath_lookup_pkey(dev->dd, be32_to_cpu(ohdr->bth[0])) : 0;
+ 	wc.slid = be16_to_cpu(hdr->lrh[3]);
+ 	wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
+ 	dlid = be16_to_cpu(hdr->lrh[1]);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0330_rc_send_mr.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0330_rc_send_mr.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0330_rc_send_mr.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,47 @@
+diff -up a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-09 19:06:05.000000000 -0700
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-11 18:15:27.000000000 -0700
+@@ -297,8 +297,12 @@ int ipath_make_rc_req(struct ipath_qp *q
+ 	 * the same PSN multiple times.
+ 	 */
+ 	if (ipath_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+-		qp->s_flags |= IPATH_S_WAITING;
+-		goto bail;
++		if (ipath_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
++			qp->s_flags |= IPATH_S_WAITING;
++			goto bail;
++		}
++		qp->s_sending_psn = qp->s_psn;
++		qp->s_sending_hpsn = qp->s_psn - 1;
+ 	}
+ 
+ 	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
+@@ -832,10 +836,6 @@ static void reset_psn(struct ipath_qp *q
+ 	}
+ done:
+ 	qp->s_psn = psn;
+-	if (ipath_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+-		qp->s_sending_psn = psn;
+-		qp->s_sending_hpsn = psn - 1;
+-	}
+ }
+ 
+ /**
+@@ -971,7 +971,7 @@ void ipath_rc_send_complete(struct ipath
+ 	 * If we were waiting for sends to complete before resending,
+ 	 * and they are now complete, restart sending.
+ 	 */
+-	if (qp->s_acked != qp->s_head &&
++	if (qp->s_cur != qp->s_head &&
+ 	    ipath_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0 &&
+ 	    ipath_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+ 		qp->s_sending_psn = qp->s_psn;
+@@ -1169,7 +1169,7 @@ static int do_rc_ack(struct ipath_qp *qp
+ 		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ 		qp->s_retry = qp->s_retry_cnt;
+ 		update_last_psn(qp, psn);
+-		if (qp->s_acked != qp->s_head)
++		if (qp->s_cur != qp->s_head)
+ 			ipath_schedule_send(qp);
+ 		else
+ 			qp->s_flags &= ~IPATH_S_WAITING;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0340_pkey_change_event.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0340_pkey_change_event.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0340_pkey_change_event.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff -up a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
+--- a/drivers/infiniband/hw/ipath/ipath_mad.c	2009-03-16 10:19:00.000000000 -0700
++++ b/drivers/infiniband/hw/ipath/ipath_mad.c	2009-03-16 10:34:35.000000000 -0700
+@@ -760,6 +760,7 @@ static int set_pkeys(struct ipath_devdat
+ 		pd->port_pkeys[i] = key;
+ 	}
+ 	if (changed) {
++		struct ib_event event;
+ 		u64 pkey;
+ 
+ 		pkey = (u64) dd->ipath_pkeys[0] |
+@@ -770,6 +771,11 @@ static int set_pkeys(struct ipath_devdat
+ 			   (unsigned long long) pkey);
+ 		ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
+ 				 pkey);
++
++		event.event = IB_EVENT_PKEY_CHANGE;
++		event.device = &dd->verbs_dev->ibdev;
++		event.element.port_num = 1;
++		ib_dispatch_event(&event);
+ 	}
+ 	return 0;
+ }

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0350_iowrite32_copy_x86_64.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0350_iowrite32_copy_x86_64.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0350_iowrite32_copy_x86_64.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,91 @@
+IB/ipath - add optimized __iowrite32_copy routine for x86_64
+
+__iowrite32_copy - copy a memory block using dword multiple writes
+
+This is primarily for writing to the InfiniPath PIO buffers, which only
+support dword multiple writes, and thus can not use memcpy().  For this
+reason, we use nothing smaller than dword writes.  It is also used as
+a fast copy routine in some places that have been measured to win over
+memcpy, and the performance delta matters.
+
+Signed-off-by: John Gregor <john.gregor at qlogic.com>
+---
+ drivers/infiniband/hw/ipath/Makefile                |    1 
+ drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S |   58 ++++++++++++++++++++
+ 2 files changed, 59 insertions(+)
+
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/Makefile
+===================================================================
+--- ofa_kernel-1.4.orig/drivers/infiniband/hw/ipath/Makefile
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/Makefile
+@@ -38,6 +38,7 @@ ib_ipath-y := \
+ ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
+ ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
+ 
++ib_ipath-$(CONFIG_X86_64) += iowrite32_copy_x86_64.o
+ ib_ipath-$(CONFIG_X86_64) += memcpy_cachebypass_x86_64.o
+ ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
+ ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
+Index: ofa_kernel-1.4/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
+===================================================================
+--- /dev/null
++++ ofa_kernel-1.4/drivers/infiniband/hw/ipath/iowrite32_copy_x86_64.S
+@@ -0,0 +1,58 @@
++/*
++ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
++ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
++ *
++ * This software is available to you under a choice of one of two
++ * licenses.  You may choose to be licensed under the terms of the GNU
++ * General Public License (GPL) Version 2, available from the file
++ * COPYING in the main directory of this source tree, or the
++ * OpenIB.org BSD license below:
++ *
++ *     Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *      - Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *
++ *      - Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++/**
++ * __iowrite32_copy - copy a memory block using dword multiple writes
++ *
++ * This is primarily for writing to the InfiniPath PIO buffers, which
++ * only support dword multiple writes, and thus can not use memcpy().
++ * For this reason, we use nothing smaller than dword writes.
++ * It is also used as a fast copy routine in some places that have been
++ * measured to win over memcpy, and the performance delta matters.
++ *
++ * Count is number of dwords; might not be a qword multiple.
++ */
++
++ 	.globl __iowrite32_copy
++	.p2align 4
++/* rdi	destination, rsi source, rdx count */
++__iowrite32_copy:
++	movl %edx,%ecx
++	shrl $1,%ecx
++	andl $1,%edx
++	rep
++	movsq
++	movl %edx,%ecx
++	rep
++	movsd
++	ret

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0360_rdma_read.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0360_rdma_read.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipath_0360_rdma_read.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,114 @@
+diff -up a/drivers/infiniband/hw/ipath/ipath_rc.c c/drivers/infiniband/hw/ipath/ipath_rc.c
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-17 17:19:42.000000000 -0700
++++ c/drivers/infiniband/hw/ipath/ipath_rc.c	2009-03-20 11:43:35.000000000 -0700
+@@ -103,6 +103,12 @@ static int ipath_make_rc_ack(struct ipat
+ 	switch (qp->s_ack_state) {
+ 	case OP(RDMA_READ_RESPONSE_LAST):
+ 	case OP(RDMA_READ_RESPONSE_ONLY):
++		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
++		if (e->rdma_sge.mr) {
++			atomic_dec(&e->rdma_sge.mr->refcount);
++			e->rdma_sge.mr = NULL;
++		}
++		/* FALLTHROUGH */
+ 	case OP(ATOMIC_ACKNOWLEDGE):
+ 		/*
+ 		 * We can increment the tail pointer now that the last
+@@ -130,13 +136,15 @@ static int ipath_make_rc_ack(struct ipat
+ 			 * then stop sending the remaining responses the
+ 			 * responder has seen until the requester resends it.
+ 			 */
+-			if (!e->rdma_sge.mr) {
++			if (e->rdma_sge.sge_length && !e->rdma_sge.mr) {
+ 				qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ 				qp->s_ack_state = OP(ACKNOWLEDGE);
+ 				goto bail;
+ 			}
+ 			/* Copy SGE state in case we need to resend */
+ 			qp->s_rdma_mr = e->rdma_sge.mr;
++			if (qp->s_rdma_mr)
++				atomic_inc(&qp->s_rdma_mr->refcount);
+ 			qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ 			qp->s_ack_rdma_sge.num_sge = 1;
+ 			qp->s_cur_sge = &qp->s_ack_rdma_sge;
+@@ -144,11 +152,9 @@ static int ipath_make_rc_ack(struct ipat
+ 			if (len > pmtu) {
+ 				len = pmtu;
+ 				qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+-				atomic_inc(&qp->s_rdma_mr->refcount);
+ 			} else {
+ 				qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ 				e->sent = 1;
+-				e->rdma_sge.mr = NULL;
+ 			}
+ 			ohdr->u.aeth = ipath_compute_aeth(qp);
+ 			hwords++;
+@@ -177,17 +183,17 @@ static int ipath_make_rc_ack(struct ipat
+ 	case OP(RDMA_READ_RESPONSE_MIDDLE):
+ 		qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ 		qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
++		if (qp->s_rdma_mr)
++			atomic_inc(&qp->s_rdma_mr->refcount);
+ 		len = qp->s_ack_rdma_sge.sge.sge_length;
+-		if (len > pmtu) {
++		if (len > pmtu)
+ 			len = pmtu;
+-			atomic_inc(&qp->s_rdma_mr->refcount);
+-		} else {
++		else {
+ 			ohdr->u.aeth = ipath_compute_aeth(qp);
+ 			hwords++;
+ 			qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ 			e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ 			e->sent = 1;
+-			e->rdma_sge.mr = NULL;
+ 		}
+ 		bth0 = qp->s_ack_state << 24;
+ 		bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
+@@ -216,6 +222,7 @@ static int ipath_make_rc_ack(struct ipat
+ 		bth0 = OP(ACKNOWLEDGE) << 24;
+ 		bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
+ 	}
++	qp->s_rdma_ack_cnt++;
+ 	qp->s_hdrwords = hwords;
+ 	qp->s_cur_size = len;
+ 	ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
+@@ -670,7 +677,8 @@ static void send_rc_ack(struct ipath_qp 
+ 	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
+ 	if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
+ 	    (qp->s_flags & IPATH_S_ACK_PENDING) ||
+-	    qp->s_ack_state != OP(ACKNOWLEDGE))
++	    qp->s_ack_state != OP(ACKNOWLEDGE) ||
++	    qp->s_rdma_ack_cnt)
+ 		goto queue_ack;
+ 
+ 	spin_unlock_irqrestore(&qp->s_lock, flags);
+@@ -935,12 +943,14 @@ void ipath_rc_send_complete(struct ipath
+ 		ohdr = &hdr->u.l.oth;
+ 
+ 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+-	psn = be32_to_cpu(ohdr->bth[2]);
+-
+ 	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+-	    opcode <= OP(ATOMIC_ACKNOWLEDGE))
++	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
++		WARN_ON(!qp->s_rdma_ack_cnt);
++		qp->s_rdma_ack_cnt--;
+ 		return;
++	}
+ 
++	psn = be32_to_cpu(ohdr->bth[2]);
+ 	reset_sending_psn(qp, psn);
+ 
+ 	while (qp->s_last != qp->s_acked) {
+diff -up a/drivers/infiniband/hw/ipath/ipath_verbs.h c/drivers/infiniband/hw/ipath/ipath_verbs.h
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.h	2009-03-17 17:19:42.000000000 -0700
++++ c/drivers/infiniband/hw/ipath/ipath_verbs.h	2009-03-20 11:38:28.000000000 -0700
+@@ -423,6 +423,7 @@ struct ipath_qp {
+ 	u8 s_dmult;
+ 	u8 s_draining;
+ 	u8 timeout;		/* Timeout for this QP */
++	u16 s_rdma_ack_cnt;
+ 	enum ib_mtu path_mtu;
+ 	u32 remote_qpn;
+ 	u32 qkey;		/* QKEY for this QP (for UD or RD) */

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0410_enable_lro.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0410_enable_lro.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0410_enable_lro.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,21 +0,0 @@
-IPoIB: Enable LRO by default.
-
-Signed-off-by: Vladimir Sokolovsky <vlad at mellanox.co.il>
----
----
- drivers/infiniband/ulp/ipoib/ipoib_main.c |    2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
-===================================================================
---- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c
-+++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
-@@ -60,7 +60,7 @@ MODULE_PARM_DESC(send_queue_size, "Numbe
- module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
- MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
- 
--static int lro;
-+static int lro = 1;
- module_param(lro, bool, 0444);
- MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
- 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0500_do_not_join_broadcast_group_if_interface_is_brought_down.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0500_do_not_join_broadcast_group_if_interface_is_brought_down.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0500_do_not_join_broadcast_group_if_interface_is_brought_down.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,72 +1,37 @@
-commit a573a06d88c753bc531e5dea74d7c60aacbcc28c
+commit 50df48f59d656d58a1734df5cfe00cdc9a74e8b5
 Author: Yossi Etigin <yosefe at Voltaire.COM>
-Date:   Wed Nov 19 21:53:27 2008 +0200
+Date:   Mon Jan 12 19:28:42 2009 -0800
 
-    ipoib: do not join broadcast group if interface is brought down
-
-    Because ipoib_workqueue is not flushed when ipoib interface is brought down,
-    ipoib_mcast_join() may trigger a join to the broadcast group after priv->broadcast
-    was set to NULL (during cleanup). This will cause ipoib to be joined to the
-    broadcast group when interface is down.
-    As a side effect, this breaks the optimization of setting qkey only when joining
-    the broadcast group.
-
+    IPoIB: Do not join broadcast group if interface is brought down
+    
+    Because the ipoib_workqueue is not flushed when ipoib interface is
+    brought down, ipoib_mcast_join() may trigger a join to the broadcast
+    group after priv->broadcast was set to NULL (during cleanup).  This
+    will cause the system to be a member of the broadcast group when
+    interface is down.  As a side effect, this breaks the optimization of
+    setting the Q_key only when joining the broadcast group.
+    
     Signed-off-by: Yossi Etigin <yosefe at voltaire.com>
+    Signed-off-by: Roland Dreier <rolandd at cisco.com>
 
     --
 
     Fix bugzilla 1370.
 
+    V2: Totally different approach from V1.
+
+
 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-index aae2862..3fc1ff7 100644
+index a2eb3b9..59d02e0 100644
 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -472,7 +472,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
- 		IB_SA_MCMEMBER_REC_PKEY		|
- 		IB_SA_MCMEMBER_REC_JOIN_STATE;
-
--	if (create) {
-+	if (create && priv->broadcast) {
- 		comp_mask |=
- 			IB_SA_MCMEMBER_REC_QKEY			|
- 			IB_SA_MCMEMBER_REC_MTU_SELECTOR		|
-@@ -540,7 +540,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
- 			ipoib_warn(priv, "ib_query_port failed\n");
- 	}
-
--	if (!priv->broadcast) {
-+	rtnl_lock();
-+	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && !priv->broadcast) {
+@@ -529,6 +529,9 @@ void ipoib_mcast_join_task(struct work_struct *work)
+ 	if (!priv->broadcast) {
  		struct ipoib_mcast *broadcast;
-
+ 
++		if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
++			return;
++
  		broadcast = ipoib_mcast_alloc(dev, 1);
-@@ -551,6 +552,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
- 				queue_delayed_work(ipoib_workqueue,
- 						   &priv->mcast_task, HZ);
- 			mutex_unlock(&mcast_mutex);
-+			rtnl_unlock();
- 			return;
- 		}
-
-@@ -562,8 +564,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
- 		__ipoib_mcast_add(dev, priv->broadcast);
- 		spin_unlock_irq(&priv->lock);
- 	}
-+	rtnl_unlock();
-
--	if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
-+	if (priv->broadcast &&
-+	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- 		if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
- 			ipoib_mcast_join(dev, priv->broadcast, 0);
- 		return;
-@@ -592,7 +596,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
- 		return;
- 	}
-
--	priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
-+	if (priv->broadcast)
-+		priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
-
- 	if (!ipoib_cm_admin_enabled(dev)) {
- 		rtnl_lock();
+ 		if (!broadcast) {
+ 			ipoib_warn(priv, "failed to allocate broadcast group\n");

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0520_join_task_tempfix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0520_join_task_tempfix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0520_join_task_tempfix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,45 @@
+ipoib: fix unprotected use of priv->broadcast in ipoib_mcast_join_task.
+
+There is a race whereby the ipoib broadcast pointer may be set to NULL by flush while the join
+task is being started.  This protects the broadcast pointer access via a spinlock.  If the
+pointer is indeed NULL, we set the mcast_mtu value to the current admin_mtu value -- since
+it does not matter anyway, the I/F is going down.
+
+We also minimize the race by testing for non-null broadcast pointer in test_bit.
+The spinlock cannot be taken here because ipoib_mcast_join uses mutexes.
+
+This is a temporary fix for an ipoib architectural bug.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-02-01 10:06:44.870307000 +0200
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2009-02-01 10:08:22.841779000 +0200
+@@ -564,8 +564,10 @@ void ipoib_mcast_join_task(struct work_s
+ 		spin_unlock_irq(&priv->lock);
+ 	}
+ 
+-	if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
+-		if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
++	if (priv->broadcast &&
++	    !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
++		if (priv->broadcast &&
++		    !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+ 			ipoib_mcast_join(dev, priv->broadcast, 0);
+ 		return;
+ 	}
+@@ -593,7 +595,12 @@ void ipoib_mcast_join_task(struct work_s
+ 		return;
+ 	}
+ 
+-	priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
++	spin_lock_irq(&priv->lock);
++	if (priv->broadcast)
++		priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
++	else
++		priv->mcast_mtu = priv->admin_mtu;
++	spin_unlock_irq(&priv->lock);
+ 
+ 	if (!ipoib_cm_admin_enabled(dev)) {
+ 		rtnl_lock();

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0530_unicast_crash.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0530_unicast_crash.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0530_unicast_crash.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,43 @@
+IPoIB: In unicast_arp, do path_free only for newly-created paths.
+
+If path_rec_start() returns error, call path_free() only if the path
+was newly-created.  If we free an existing path whose valid flag was zero,
+we cause corruption of the path list (of which it is a member), and get a
+kernel crash.
+
+Leaving an existing path with its valid flag cleared in the
+list is the cleanest solution.
+
+Thanks to Yossi Etigin of Voltaire for identifying the problem flow
+which caused the kernel crash.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Moni Shua <monis at voltaire.com>
+
+Index: ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-02-09 09:39:56.340110000 +0200
++++ ofed_kernel/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-02-09 09:40:41.702488000 +0200
+@@ -684,8 +684,11 @@ static void unicast_arp_send(struct sk_b
+ 
+ 	path = __path_find(dev, phdr->hwaddr + 4);
+ 	if (!path || !path->valid) {
+-		if (!path)
++		int new_path = 0;
++		if (!path) {
+ 			path = path_rec_create(dev, phdr->hwaddr + 4);
++			new_path = 1;
++		}
+ 		if (path) {
+ 			/* put pseudoheader back on for next time */
+ 			skb_push(skb, sizeof *phdr);
+@@ -693,7 +696,8 @@ static void unicast_arp_send(struct sk_b
+ 
+ 			if (!path->query && path_rec_start(dev, path)) {
+ 				spin_unlock_irqrestore(&priv->lock, flags);
+-				path_free(dev, path);
++				if (new_path)
++					path_free(dev, path);
+ 				return;
+ 			} else
+ 				__path_add(dev, path);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0540_mcast_attach_ignore_eagain.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0540_mcast_attach_ignore_eagain.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0540_mcast_attach_ignore_eagain.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,21 @@
+ When trying to join multicast from ipoib and SM address 
+handle is NULL, the join returns with -EAGAIN status. In
+that case, do not print an error.
+
+Signed-off-by: Yossi Etigin <yosefe at voltaire.com>
+
+--
+
+Index: b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+===================================================================
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-10-22 20:28:06.000000000 +0200
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-10-27 20:13:59.000000000 +0200
+@@ -443,7 +443,7 @@ static int ipoib_mcast_join_complete(int
+ 	}
+ 
+ 	if (mcast->logcount++ < 20) {
+-		if (status == -ETIMEDOUT) {
++		if (status == -ETIMEDOUT || status == -EAGAIN) {
+ 			ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT
+ 					", status %d\n",
+ 					IPOIB_GID_ARG(mcast->mcmember.mgid),

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0550_fix_napi_poll_race.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0550_fix_napi_poll_race.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0550_fix_napi_poll_race.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,72 @@
+ipoib: disable napi while cq is being drained
+
+ If napi is enabled while cq is being drained, it creates a race on priv->ibwc
+between ipoib_poll and ipoib_drain_cq, leading to memory corruption.
+ The solution is to enable/disable napi in ipoib_ib_dev_open/stop instead of in
+ipoib_open/stop, and sync napi on INITIALIZED bit instead on ADMIN_UP bit. This
+way napi will be disabled when ipoib_drain_cq is called.
+
+Fix bugzilla #1587.
+
+Signed-off-by: Yossi Etigin <yosefe at voltaire.com>
+
+
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+===================================================================
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2009-04-16 23:43:16.000000000 +0300
+@@ -708,7 +708,8 @@ int ipoib_ib_dev_open(struct net_device 
+ 	priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ 	priv->poll_timer.data = (unsigned long)dev;
+ 
+-	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
++	if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
++		napi_enable(&priv->napi);
+ 
+ 	return 0;
+ }
+@@ -827,7 +828,8 @@ int ipoib_ib_dev_stop(struct net_device 
+ 	struct ipoib_tx_buf *tx_req;
+ 	int i;
+ 
+-	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
++	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
++		napi_disable(&priv->napi);
+ 
+ 	ipoib_cm_dev_stop(dev);
+ 
+Index: linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- linux-2.6.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:35:04.000000000 +0300
++++ linux-2.6/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-16 23:44:02.000000000 +0300
+@@ -106,20 +106,16 @@ int ipoib_open(struct net_device *dev)
+ 
+ 	ipoib_dbg(priv, "bringing up interface\n");
+ 
+-	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+-		napi_enable(&priv->napi);
++	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+ 
+ 	if (ipoib_pkey_dev_delay_open(dev))
+ 		return 0;
+ 
+-	if (ipoib_ib_dev_open(dev)) {
+-		napi_disable(&priv->napi);
++	if (ipoib_ib_dev_open(dev))
+ 		return -EINVAL;
+-	}
+ 
+ 	if (ipoib_ib_dev_up(dev)) {
+ 		ipoib_ib_dev_stop(dev, 1);
+-		napi_disable(&priv->napi);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -152,7 +148,6 @@ static int ipoib_stop(struct net_device 
+ 	ipoib_dbg(priv, "stopping interface\n");
+ 
+ 	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+-	napi_disable(&priv->napi);
+ 
+ 	netif_stop_queue(dev);
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0560_clear_admin_up_flag.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0560_clear_admin_up_flag.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/ipoib_0560_clear_admin_up_flag.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,39 @@
+ipoib: clear IPOIB_FLAG_ADMIN_UP if ipoib_open fails
+
+  If ipoib_open() fails, it should clear IPOIB_FLAG_ADMIN_UP bit and not
+leave if on.
+This is already fixed in 2.6.30.
+
+Reported-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+Signed-off-by: Yossi Etigin <yosefe at voltaire.com>
+
+---
+
+Index: b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+===================================================================
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-22 19:45:11.000000000 +0300
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c	2009-04-22 19:51:51.000000000 +0300
+@@ -112,11 +112,11 @@ int ipoib_open(struct net_device *dev)
+ 		return 0;
+ 
+ 	if (ipoib_ib_dev_open(dev))
+-		return -EINVAL;
++		goto err;
+ 
+ 	if (ipoib_ib_dev_up(dev)) {
+ 		ipoib_ib_dev_stop(dev, 1);
+-		return -EINVAL;
++		goto err;
+ 	}
+ 
+ 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+@@ -139,6 +139,9 @@ int ipoib_open(struct net_device *dev)
+ 	netif_start_queue(dev);
+ 
+ 	return 0;
++err:
++	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
++	return -EINVAL;
+ }
+ 
+ static int ipoib_stop(struct net_device *dev)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0200_sgl_pbl_offset-calculation_needs_64_bits.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0200_sgl_pbl_offset-calculation_needs_64_bits.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0200_sgl_pbl_offset-calculation_needs_64_bits.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,43 @@
+commit 900f4c16c338f742b80f3aa500e12ceb017e86af
+Author: Steve Wise <swise at opengridcomputing.com>
+Date:   Tue Feb 10 16:38:22 2009 -0800
+
+    RDMA/cxgb3: sgl/pbl offset calculation needs 64 bits
+    
+    The variable 'offset' in iwch_sgl2pbl_map() needs to be a u64.
+    
+    Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+    Signed-off-by: Roland Dreier <rolandd at cisco.com>
+
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+index 19661b2..c84ac5b 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+@@ -195,15 +195,12 @@ static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
+ 	return 0;
+ }
+ 
+-/*
+- * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
+- */
+ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
+ 			    u32 num_sgle, u32 * pbl_addr, u8 * page_size)
+ {
+ 	int i;
+ 	struct iwch_mr *mhp;
+-	u32 offset;
++	u64 offset;
+ 	for (i = 0; i < num_sgle; i++) {
+ 
+ 		mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
+@@ -235,8 +232,8 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
+ 			return -EINVAL;
+ 		}
+ 		offset = sg_list[i].addr - mhp->attr.va_fbo;
+-		offset += ((u32) mhp->attr.va_fbo) %
+-		          (1UL << (12 + mhp->attr.page_size));
++		offset += mhp->attr.va_fbo &
++			  ((1UL << (12 + mhp->attr.page_size)) - 1);
+ 		pbl_addr[i] = ((mhp->attr.pbl_addr -
+ 			        rhp->rdev.rnic_info.pbl_base) >> 3) +
+ 			      (offset >> (12 + mhp->attr.page_size));

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0300_connection_termination_fixes.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0300_connection_termination_fixes.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0300_connection_termination_fixes.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,112 @@
+commit 42fb61f02f9bdc476c7a76d3cce0400d989f44c5
+Author: Steve Wise <swise at opengridcomputing.com>
+Date:   Tue Feb 10 16:38:57 2009 -0800
+
+    RDMA/cxgb3: Connection termination fixes
+    
+    The poll and flush code needs to handle all send opcodes: SEND,
+    SEND_WITH_SE, SEND_WITH_INV, and SEND_WITH_SE_INV.
+    
+    Ignore TERM indications if the connection already gone.
+    
+    Ignore HW receive completions if the RQ is empty.
+    
+    Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+    Signed-off-by: Roland Dreier <rolandd at cisco.com>
+
+diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
+index 4dcf08b..c2740e7 100644
+--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
++++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
+@@ -450,7 +450,7 @@ static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
+ 	if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
+ 		return 0;
+ 
+-	if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) &&
++	if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
+ 	    Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
+ 		return 0;
+ 
+@@ -1204,11 +1204,12 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
+ 		}
+ 
+ 		/* incoming SEND with no receive posted failures */
+-		if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) &&
++		if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
+ 		    Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
+ 			ret = -1;
+ 			goto skip_cqe;
+ 		}
++		BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
+ 		goto proc_cqe;
+ 	}
+ 
+@@ -1223,6 +1224,13 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
+ 		 * then we complete this with TPT_ERR_MSN and mark the wq in
+ 		 * error.
+ 		 */
++
++		if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
++			wq->error = 1;
++			ret = -1;
++			goto skip_cqe;
++		}
++
+ 		if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
+ 			wq->error = 1;
+ 			hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
+@@ -1277,6 +1285,7 @@ proc_cqe:
+ 			cxio_hal_pblpool_free(wq->rdev,
+ 				wq->rq[Q_PTR2IDX(wq->rq_rptr,
+ 				wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
++		BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
+ 		wq->rq_rptr++;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
+index 04618f7..ff9be1a 100644
+--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
++++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
+@@ -604,6 +604,12 @@ struct t3_cqe {
+ #define CQE_STATUS(x)     (G_CQE_STATUS(be32_to_cpu((x).header)))
+ #define CQE_OPCODE(x)     (G_CQE_OPCODE(be32_to_cpu((x).header)))
+ 
++#define CQE_SEND_OPCODE(x)( \
++	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
++	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
++	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
++	(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
++
+ #define CQE_LEN(x)        (be32_to_cpu((x).len))
+ 
+ /* used for RQ completion processing */
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index 44e936e..8699947 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -1678,6 +1678,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+ {
+ 	struct iwch_ep *ep = ctx;
+ 
++	if (state_read(&ep->com) != FPDU_MODE)
++		return CPL_RET_BUF_DONE;
++
+ 	PDBG("%s ep %p\n", __func__, ep);
+ 	skb_pull(skb, sizeof(struct cpl_rdma_terminate));
+ 	PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
+index 7b67a67..743c5d8 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
+@@ -179,11 +179,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
+ 	case TPT_ERR_BOUND:
+ 	case TPT_ERR_INVALIDATE_SHARED_MR:
+ 	case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+-		printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
+-		       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
+-		       CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
+-		       CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
+-		       CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
+ 		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ 		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
+ 		break;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0400_remove_modulo_math_from_build_rdma_recv.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0400_remove_modulo_math_from_build_rdma_recv.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0400_remove_modulo_math_from_build_rdma_recv.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+commit 1557b4f052cb739a4ae1dd9641249b3e69fb6a0d
+Author: Steve Wise <swise at opengridcomputing.com>
+Date:   Thu Feb 12 21:43:05 2009 -0800
+
+    RDMA/cxgb3: Remove modulo math from build_rdma_recv()
+    
+    Remove modulo usage to avoid a divide in the fast path (not all
+    gcc versions do strength reduction here).
+    
+    Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+    Signed-off-by: Roland Dreier <rolandd at cisco.com>
+
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+index c84ac5b..b520d86 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+@@ -263,8 +263,8 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
+ 		wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
+ 
+ 		/* to in the WQE == the offset into the page */
+-		wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
+-				(1UL << (12 + page_size[i])));
++		wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
++				((1UL << (12 + page_size[i])) - 1));
+ 
+ 		/* pbl_addr is the adapters address in the PBL */
+ 		wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0500_Release_dependent_resources.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0500_Release_dependent_resources.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0500_Release_dependent_resources.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,105 @@
+commit e332d8e1bde9f013c9ee2c45e0708637251c9828
+Author: Steve Wise <swise at opengridcomputing.com>
+Date:   Fri Mar 27 09:34:21 2009 -0500
+
+    RDMA/cxgb3: Release dependent resources only when endpoint memory is freed.
+    
+    The cxgb3 l2t entry, hwtid, and dst entry were being released before
+    all the iwch_ep references were released.  This can cause a crash in
+    t3_l2t_send_slow() and other places where the l2t entry is used.
+    
+    The fix is to defer releasing these resources until all endpoint
+    references are gone.
+    
+    Details:
+    
+    - move flags field to the iwch_ep_common struct.
+    - add a flag indicating resources are to be released.
+    - release resources at endpoint free time instead of close/abort time.
+    
+    Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index c325c44..3c382f9 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -252,18 +252,22 @@ static void *alloc_ep(int size, gfp_t gfp)
+ 
+ void __free_ep(struct kref *kref)
+ {
+-	struct iwch_ep_common *epc;
+-	epc = container_of(kref, struct iwch_ep_common, kref);
+-	PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
+-	kfree(epc);
++	struct iwch_ep *ep;
++	ep = container_of(container_of(kref, struct iwch_ep_common, kref),
++			  struct iwch_ep, com);
++	PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
++	if (ep->com.flags & RELEASE_RESOURCES) {
++		cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
++		dst_release(ep->dst);
++		l2t_release(L2DATA(ep->com.tdev), ep->l2t);
++	}
++	kfree(ep);
+ }
+ 
+ static void release_ep_resources(struct iwch_ep *ep)
+ {
+ 	PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
+-	cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
+-	dst_release(ep->dst);
+-	l2t_release(L2DATA(ep->com.tdev), ep->l2t);
++	ep->com.flags |= RELEASE_RESOURCES;
+ 	put_ep(&ep->com);
+ }
+ 
+@@ -1127,8 +1131,8 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+ 	 * We get 2 abort replies from the HW.  The first one must
+ 	 * be ignored except for scribbling that we need one more.
+ 	 */
+-	if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
+-		ep->flags |= ABORT_REQ_IN_PROGRESS;
++	if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) {
++		ep->com.flags |= ABORT_REQ_IN_PROGRESS;
+ 		return CPL_RET_BUF_DONE;
+ 	}
+ 
+@@ -1534,8 +1538,8 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
+ 	 * We get 2 peer aborts from the HW.  The first one must
+ 	 * be ignored except for scribbling that we need one more.
+ 	 */
+-	if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
+-		ep->flags |= PEER_ABORT_IN_PROGRESS;
++	if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) {
++		ep->com.flags |= PEER_ABORT_IN_PROGRESS;
+ 		return CPL_RET_BUF_DONE;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
+index d7c7e09..43c0aea 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
+@@ -147,6 +147,7 @@ enum iwch_ep_state {
+ enum iwch_ep_flags {
+ 	PEER_ABORT_IN_PROGRESS	= (1 << 0),
+ 	ABORT_REQ_IN_PROGRESS	= (1 << 1),
++	RELEASE_RESOURCES	= (1 << 2),
+ };
+ 
+ struct iwch_ep_common {
+@@ -161,6 +162,7 @@ struct iwch_ep_common {
+ 	wait_queue_head_t waitq;
+ 	int rpl_done;
+ 	int rpl_err;
++	u32 flags;
+ };
+ 
+ struct iwch_listen_ep {
+@@ -188,7 +190,6 @@ struct iwch_ep {
+ 	u16 plen;
+ 	u32 ird;
+ 	u32 ord;
+-	u32 flags;
+ };
+ 
+ static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0600_Adjust_ordird_if_needed_for_peer2peer_connections.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0600_Adjust_ordird_if_needed_for_peer2peer_connections.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0600_Adjust_ordird_if_needed_for_peer2peer_connections.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,43 @@
+RDMA/cxgb3: Adjust ord/ird if needed for peer2peer connections
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+NFSRDMA currently fails to setup connections if peer2peer is on.  This is
+due to the fact that the NFSRDMA client sets its ord to 0.
+
+If peer2peer is set, make sure the active side ord is >= 1 and the
+passive side ird is >=1.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/iwch_cm.c |    8 ++++++++
+ 1 files changed, 8 insertions(+), 0 deletions(-)
+
+
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index 042cc4d..c1f121e 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -1830,6 +1830,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	ep->com.rpl_err = 0;
+ 	ep->ird = conn_param->ird;
+ 	ep->ord = conn_param->ord;
++
++	if (peer2peer && ep->ird == 0)
++		ep->ird = 1;
++
+ 	PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+ 
+ 	get_ep(&ep->com);
+@@ -1915,6 +1919,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		       conn_param->private_data, ep->plen);
+ 	ep->ird = conn_param->ird;
+ 	ep->ord = conn_param->ord;
++
++	if (peer2peer && ep->ord == 0)
++		ep->ord = 1;
++
+ 	ep->com.tdev = h->rdev.t3cdev_p;
+ 
+ 	cm_id->add_ref(cm_id);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0700_Dont_zero_the_qp_attrs_when_moving_to_IDLE.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0700_Dont_zero_the_qp_attrs_when_moving_to_IDLE.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0700_Dont_zero_the_qp_attrs_when_moving_to_IDLE.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+RDMA/cxgb3: Don't zero the qp attrs when moving to IDLE.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+QP attributes must stay initialized when moving back to IDLE. Zeroing
+them will crash the system in _flush_qp() if the QP is subsequently
+moved to ERROR and back to IDLE.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/iwch_qp.c |    1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+index 2f546a6..27bbdc8 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
+@@ -1069,7 +1069,6 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
+ 			goto out;
+ 		}
+ 		qhp->attr.state = IWCH_QP_STATE_IDLE;
+-		memset(&qhp->attr, 0, sizeof(qhp->attr));
+ 		break;
+ 	case IWCH_QP_STATE_TERMINATE:
+ 		if (!internal) {

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0800_flush_sq_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0800_flush_sq_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/iw_cxgb3_0800_flush_sq_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,27 @@
+commit 7c9be46af6e4ede212e1b0a16bcfeb1ac1570601
+Author: Steve Wise <swise at opengridcomputing.com>
+Date:   Wed Apr 29 13:46:10 2009 -0500
+
+    RDMA/cxgb3: flushed sq wr completions get inserted twice in to the cqe
+    
+    When the sq is flushed, mark the flushed entries as not signaled so
+    the poll logic doesn't re-insert the cqe thinking its an out of order
+    completion.
+    
+    The bug can cause the nfsrdma server to crash due to processing the same
+    completed WR twice.
+    
+    Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+
+diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
+index 8d71086..62f9cf2 100644
+--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
++++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
+@@ -410,6 +410,7 @@ int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
+ 	ptr = wq->sq_rptr + count;
+ 	sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
+ 	while (ptr != wq->sq_wptr) {
++		sqp->signaled = 0;
+ 		insert_sq_cqe(wq, cq, sqp);
+ 		ptr++;
+ 		sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0010_add_wc.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0010_add_wc.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0010_add_wc.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -2,7 +2,10 @@
 
 Supported on i386 and x86_64 for now.
 
+V2: Added support for PPC64, cleaned up warnings for unsupported platforms.
+
 Signed-off-by: Michael S. Tsirkin <mst at dev.mellanox.co.il>
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
 
 ---
  drivers/infiniband/hw/mlx4/Makefile |    1 
@@ -13,8 +16,8 @@
 
 Index: ofed_kernel/drivers/infiniband/hw/mlx4/Makefile
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/mlx4/Makefile
-+++ ofed_kernel/drivers/infiniband/hw/mlx4/Makefile
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/Makefile	2009-04-05 10:02:23.222077000 +0300
++++ ofed_kernel/drivers/infiniband/hw/mlx4/Makefile	2009-04-05 10:10:47.671938000 +0300
 @@ -1,3 +1,4 @@
  obj-$(CONFIG_MLX4_INFINIBAND)	+= mlx4_ib.o
  
@@ -22,8 +25,8 @@
 +mlx4_ib-y +=	wc.o
 Index: ofed_kernel/drivers/infiniband/hw/mlx4/main.c
 ===================================================================
---- ofed_kernel.orig/drivers/infiniband/hw/mlx4/main.c
-+++ ofed_kernel/drivers/infiniband/hw/mlx4/main.c
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/main.c	2009-04-05 10:02:23.223082000 +0300
++++ ofed_kernel/drivers/infiniband/hw/mlx4/main.c	2009-04-05 10:11:29.782421000 +0300
 @@ -43,6 +43,7 @@
  
  #include "mlx4_ib.h"
@@ -58,9 +61,9 @@
  module_init(mlx4_ib_init);
 Index: ofed_kernel/drivers/infiniband/hw/mlx4/wc.c
 ===================================================================
---- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/mlx4/wc.c
-@@ -0,0 +1,206 @@
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ ofed_kernel/drivers/infiniband/hw/mlx4/wc.c	2009-04-05 10:13:42.026670000 +0300
+@@ -0,0 +1,232 @@
 +/*
 + * Copyright (c) 2006-2007 Mellanox Technologies. All rights reserved.
 + *
@@ -96,18 +99,17 @@
 +#include <linux/pci.h>
 +#include "wc.h"
 +
-+static u32 old_pat_lo[NR_CPUS] = {0};
-+static u32 old_pat_hi[NR_CPUS] = {0};
-+static unsigned int wc_enabled = 0;
++#if defined(__i386__) || defined(__x86_64__)
 +
 +#define MLX4_PAT_MASK	(0xFFFFF8FF)
 +#define MLX4_PAT_MOD	(0x00000100)
 +#define MLX4_WC_FLAGS	(_PAGE_PWT)
++#define X86_MSR_PAT_OFFSET  0x277
 +
-+#if defined(__i386__) || defined(__x86_64__)
++static unsigned int wc_enabled = 0;
++static u32 old_pat_lo[NR_CPUS] = {0};
++static u32 old_pat_hi[NR_CPUS] = {0};
 +
-+#define X86_MSR_PAT_OFFSET  0x277
-+
 +/*  Returns non-zero if we have a chipset write-combining problem */
 +static int have_wc_errata(void)
 +{
@@ -250,6 +252,33 @@
 +	return wc_enabled;
 +}
 +
++#elif defined(CONFIG_PPC64)
++
++static unsigned int wc_enabled = 0;
++
++int mlx4_enable_wc(void)
++{
++	wc_enabled = 1;
++	return 0;
++}
++
++void mlx4_disable_wc(void)
++{
++	wc_enabled = 0;
++}
++
++pgprot_t pgprot_wc(pgprot_t _prot)
++{
++	return wc_enabled ? __pgprot((pgprot_val(_prot) | _PAGE_NO_CACHE) &
++				     ~(pgprot_t)_PAGE_GUARDED) :
++			    pgprot_noncached(_prot);
++}
++
++int mlx4_wc_enabled(void)
++{
++	return wc_enabled;
++}
++
 +#else	/* !(defined(__i386__) || defined(__x86_64__)) */
 +
 +int mlx4_enable_wc(void){ return 0; }
@@ -269,8 +298,8 @@
 +
 Index: ofed_kernel/drivers/infiniband/hw/mlx4/wc.h
 ===================================================================
---- /dev/null
-+++ ofed_kernel/drivers/infiniband/hw/mlx4/wc.h
+--- /dev/null	1970-01-01 00:00:00.000000000 +0000
++++ ofed_kernel/drivers/infiniband/hw/mlx4/wc.h	2009-04-05 10:10:47.686936000 +0300
 @@ -0,0 +1,43 @@
 +/*
 + * Copyright (c) 2006-2007 Mellanox Technologies. All rights reserved.

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0420_Auto-negotiation-support.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0420_Auto-negotiation-support.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0420_Auto-negotiation-support.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -414,7 +414,7 @@
 +	/*
 +	 * If both sensed nothing, remain in current configuration.
 +	 */
-+        for (i = 0; i < dev->caps.num_ports - 1; i++)
++        for (i = 0; i < dev->caps.num_ports; i++)
 +		stype[i] = stype[i] ? stype[i] : dev->caps.port_type[i+1];
 +
 +	if (mlx4_check_port_params(dev, stype))
@@ -472,7 +472,7 @@
 +void mlx4_sense_cleanup(struct mlx4_dev *dev)
 +{
 +	mlx4_stop_sense(dev);
-+	cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
++	cancel_delayed_work(&mlx4_priv(dev)->sense.sense_poll);
 +	destroy_workqueue(mlx4_priv(dev)->sense.sense_wq);
 +}
 +

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0500_VPI_makefile.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0500_VPI_makefile.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0500_VPI_makefile.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -13,4 +13,4 @@
 +
 +obj-$(CONFIG_MLX4_EN)			+= mlx4_en.o
 +
-+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o
++mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o en_resources.o en_netdev.o en_frag.o en_lro.o

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0600_sense_from_sysfs_context.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0600_sense_from_sysfs_context.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0600_sense_from_sysfs_context.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,4 +1,4 @@
-From a82c8d6923ce13845e50771ba76b8b9dbccb82ca Mon Sep 17 00:00:00 2001
+From 652a6299b6e3516cec2d40809ea33309da413707 Mon Sep 17 00:00:00 2001
 From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
 Date: Thu, 4 Dec 2008 10:53:12 +0200
 Subject: [PATCH] mlx4: sense from sysfs context
@@ -9,14 +9,28 @@
 
 Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
 ---
- drivers/net/mlx4/main.c |   14 ++++++++++----
- 1 files changed, 10 insertions(+), 4 deletions(-)
+ drivers/net/mlx4/main.c |   20 +++++++++++++-------
+ 1 files changed, 13 insertions(+), 7 deletions(-)
 
 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
-index 14d6b3c..7df678d 100644
+index 14d6b3c..4ac6896 100644
 --- a/drivers/net/mlx4/main.c
 +++ b/drivers/net/mlx4/main.c
-@@ -427,6 +427,7 @@ static ssize_t set_port_type(struct device *dev,
+@@ -372,11 +372,10 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
+ 	int port;
+ 
+ 	for (port = 0; port <  dev->caps.num_ports; port++) {
+-		if (port_types[port] != dev->caps.port_type[port + 1]) {
++		if (port_types[port] != dev->caps.port_type[port + 1] &&
++		    port_types[port] != MLX4_PORT_TYPE_AUTO) {
+ 			change = 1;
+ 			dev->caps.port_type[port + 1] = port_types[port];
+-			if (dev->caps.possible_type[port + 1] != MLX4_PORT_TYPE_AUTO)
+-				dev->caps.possible_type[port + 1] = port_types[port];
+ 		}
+ 	}
+ 	if (change) {
+@@ -427,6 +426,7 @@ static ssize_t set_port_type(struct device *dev,
  	struct mlx4_dev *mdev = info->dev;
  	struct mlx4_priv *priv = mlx4_priv(mdev);
  	enum mlx4_port_type types[MLX4_MAX_PORTS];
@@ -24,7 +38,7 @@
  	int i;
  	int err = 0;
  
-@@ -442,6 +443,15 @@ static ssize_t set_port_type(struct device *dev,
+@@ -442,6 +442,16 @@ static ssize_t set_port_type(struct device *dev,
  	}
  
  	mutex_lock(&priv->port_mutex);
@@ -34,7 +48,8 @@
 +		if (!err && (tmp_type == MLX4_PORT_TYPE_ETH ||
 +			     tmp_type == MLX4_PORT_TYPE_IB)) {
 +			info->tmp_type = tmp_type;
-+		}
++		} else
++			info->tmp_type = mdev->caps.port_type[info->port];
 +	}
 +
  	for (i = 0; i < mdev->caps.num_ports; i++)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0640_port_configuration_documentation.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0640_port_configuration_documentation.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0640_port_configuration_documentation.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,91 @@
+From 2211e5f9890a97ebe1b04a43c2c6500becc5326e Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Thu, 25 Dec 2008 17:19:58 +0200
+Subject: [PATCH] mlx4: Added documentation to the port management part.
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/net/mlx4/main.c  |   18 ++++++++++++++++++
+ drivers/net/mlx4/sense.c |    2 +-
+ 2 files changed, 19 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
+index 4ac6896..74553b9 100644
+--- a/drivers/net/mlx4/main.c
++++ b/drivers/net/mlx4/main.c
+@@ -372,6 +372,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
+ 	int port;
+ 
+ 	for (port = 0; port <  dev->caps.num_ports; port++) {
++		/* Change the port type only if the new type is different
++		 * from the current, and not set to Auto */
+ 		if (port_types[port] != dev->caps.port_type[port + 1] &&
+ 		    port_types[port] != MLX4_PORT_TYPE_AUTO) {
+ 			change = 1;
+@@ -442,8 +444,12 @@ static ssize_t set_port_type(struct device *dev,
+ 	}
+ 
+ 	mutex_lock(&priv->port_mutex);
++	/* Possible type is always the one that was delivered */
+ 	mdev->caps.possible_type[info->port] = info->tmp_type;
+ 	if (info->tmp_type == MLX4_PORT_TYPE_AUTO) {
++		/* Sense immediatelly to find the actual port type
++		 * If no port was sensed, assign the currently 
++		 * configured port type */
+ 		err = mlx4_SENSE_PORT(mdev, info->port, &tmp_type);
+ 		if (!err && (tmp_type == MLX4_PORT_TYPE_ETH ||
+ 			     tmp_type == MLX4_PORT_TYPE_IB)) {
+@@ -452,11 +458,15 @@ static ssize_t set_port_type(struct device *dev,
+ 			info->tmp_type = mdev->caps.port_type[info->port];
+ 	}
+ 
++	/* Collect the required port types from all ports,
++	 * If not specified, use the currently configured */
+ 	for (i = 0; i < mdev->caps.num_ports; i++)
+ 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
+ 					mdev->caps.possible_type[i+1];
+ 
+ 	if (priv->trig) {
++		/* Wait for the other ports to be modified, or perform
++		 * the change now */
+ 		if (++priv->changed_ports < mdev->caps.num_ports)
+ 			goto out;
+ 		else
+@@ -466,6 +476,9 @@ static ssize_t set_port_type(struct device *dev,
+ 	if (err)
+ 		goto out;
+ 
++	/* We are about to apply the changes after the configuration
++	 * was verified, no need to remember the temporary types
++	 * any more */
+ 	for (i = 0; i < mdev->caps.num_ports; i++) {
+ 		priv->port[i + 1].tmp_type = 0;
+ 	}
+@@ -477,6 +490,11 @@ out:
+ 	return err ? err : count;
+ }
+ 
++/*
++ * This function is invoked if user wants to modify all port types
++ * at once. We will wait for all the ports to be assigned new values,
++ * and only then will perform the change.
++ */
+ static ssize_t trigger_port(struct device *dev,
+ 			    struct device_attribute *attr,
+ 			    const char *buf, size_t count)
+diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
+index c3e556d..44e1a08 100644
+--- a/drivers/net/mlx4/sense.c
++++ b/drivers/net/mlx4/sense.c
+@@ -98,7 +98,7 @@ static void mlx4_sense_port(struct work_struct *work)
+ 	}
+ 
+ 	/*
+-	 * If both sensed nothing, remain in current configuration.
++	 * If sensed nothing, remain in current configuration.
+ 	 */
+         for (i = 0; i < dev->caps.num_ports; i++)
+ 		stype[i] = stype[i] ? stype[i] : dev->caps.port_type[i+1];
+-- 
+1.5.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0650_fix_taking_SL_field_of_cqe_sl_vid.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0650_fix_taking_SL_field_of_cqe_sl_vid.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0650_fix_taking_SL_field_of_cqe_sl_vid.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+From 04ec21e0690e8af182635c95e85a73842d3adaa5 Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Mon, 5 Jan 2009 09:16:45 +0200
+Subject: [PATCH] mlx4_ib: Fix taking SL field of cqe->sl_vid
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/infiniband/hw/mlx4/cq.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index d0866a3..683cdfc 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -692,7 +692,7 @@ repoll:
+ 		}
+ 
+ 		wc->slid	   = be16_to_cpu(cqe->rlid);
+-		wc->sl		   = be16_to_cpu(cqe->sl_vid >> 12);
++		wc->sl		   = be16_to_cpu(cqe->sl_vid) >> 12;
+ 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
+ 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
+ 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
+-- 
+1.5.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0660_no_IB_device_registration_without_ib_ports.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0660_no_IB_device_registration_without_ib_ports.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0660_no_IB_device_registration_without_ib_ports.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,58 @@
+From 8fc65c6a202e16cb3a0d7736f86cad2d9ef05685 Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Sun, 11 Jan 2009 17:10:36 +0200
+Subject: [PATCH] mlx4_ib: Don't register IB device for adapterswith no IB ports
+
+If the mlx4_ib driver finds an adapter that has only ethernet ports, the
+current code will register an IB device with 0 ports.  Nothing useful or
+sensible can be done with such a device, so just skip registering it.
+
+Signed-off-by: Roland Dreier <rolandd at cisco.com>
+---
+ drivers/infiniband/hw/mlx4/main.c |   16 ++++++++++------
+ 1 files changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 3ef3956..7ecb001 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -760,14 +760,22 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
+ {
+ 	static int mlx4_ib_version_printed;
+ 	struct mlx4_ib_dev *ibdev;
++	int num_ports = 0;
+ 	int i;
+ 
+-
+ 	if (!mlx4_ib_version_printed) {
+ 		printk(KERN_INFO "%s", mlx4_ib_version);
+ 		++mlx4_ib_version_printed;
+ 	}
+ 
++
++	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
++		num_ports++;
++
++	/* No point in registering device with no ports */
++	if (num_ports == 0)
++		return NULL;
++
+ 	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
+ 	if (!ibdev) {
+ 		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
+@@ -791,11 +799,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
+ 	ibdev->ib_dev.owner		= THIS_MODULE;
+ 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
+ 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
+-	ibdev->num_ports = 0;
+-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+-		ibdev->num_ports++;
+-	if (!ibdev->num_ports)
+-		return ibdev;
++	ibdev->num_ports = num_ports;
+ 	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
+ 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
+ 	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
+-- 
+1.5.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0670_invoke_sense_function_from_sysfs_context.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0670_invoke_sense_function_from_sysfs_context.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_0670_invoke_sense_function_from_sysfs_context.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,201 @@
+From 98f4278dd97e5f7907eb5585c6b7fc4d294c4f78 Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Wed, 21 Jan 2009 15:50:01 +0200
+Subject: [PATCH] mlx4: Always sense from sysfs context.
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/net/mlx4/main.c  |   43 ++++++++++++++++++++++++++-----------------
+ drivers/net/mlx4/mlx4.h  |    4 +++-
+ drivers/net/mlx4/sense.c |   38 ++++++++++++++++++++++++--------------
+ 3 files changed, 53 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
+index 74655f3..50a6ce3 100644
+--- a/drivers/net/mlx4/main.c
++++ b/drivers/net/mlx4/main.c
+@@ -372,8 +372,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
+ 	for (port = 0; port <  dev->caps.num_ports; port++) {
+ 		/* Change the port type only if the new type is different
+ 		 * from the current, and not set to Auto */
+-		if (port_types[port] != dev->caps.port_type[port + 1] &&
+-		    port_types[port] != MLX4_PORT_TYPE_AUTO) {
++		if (port_types[port] != dev->caps.port_type[port + 1]) {
+ 			change = 1;
+ 			dev->caps.port_type[port + 1] = port_types[port];
+ 		}
+@@ -426,7 +425,7 @@ static ssize_t set_port_type(struct device *dev,
+ 	struct mlx4_dev *mdev = info->dev;
+ 	struct mlx4_priv *priv = mlx4_priv(mdev);
+ 	enum mlx4_port_type types[MLX4_MAX_PORTS];
+-	enum mlx4_port_type tmp_type;
++	enum mlx4_port_type new_types[MLX4_MAX_PORTS];
+ 	int i;
+ 	int err = 0;
+ 
+@@ -444,23 +443,15 @@ static ssize_t set_port_type(struct device *dev,
+ 	mutex_lock(&priv->port_mutex);
+ 	/* Possible type is always the one that was delivered */
+ 	mdev->caps.possible_type[info->port] = info->tmp_type;
+-	if (info->tmp_type == MLX4_PORT_TYPE_AUTO) {
+-		/* Sense immediatelly to find the actual port type
+-		 * If no port was sensed, assign the currently 
+-		 * configured port type */
+-		err = mlx4_SENSE_PORT(mdev, info->port, &tmp_type);
+-		if (!err && (tmp_type == MLX4_PORT_TYPE_ETH ||
+-			     tmp_type == MLX4_PORT_TYPE_IB)) {
+-			info->tmp_type = tmp_type;
+-		} else
+-			info->tmp_type = mdev->caps.port_type[info->port];
+-	}
+ 
+ 	/* Collect the required port types from all ports,
+ 	 * If not specified, use the currently configured */
+-	for (i = 0; i < mdev->caps.num_ports; i++)
++	for (i = 0; i < mdev->caps.num_ports; i++) {
+ 		types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
+ 					mdev->caps.possible_type[i+1];
++		if (types[i] == MLX4_PORT_TYPE_AUTO)
++			types[i] = mdev->caps.port_type[i+1];
++	}
+ 
+ 	if (priv->trig) {
+ 		/* Wait for the other ports to be modified, or perform
+@@ -470,7 +461,25 @@ static ssize_t set_port_type(struct device *dev,
+ 		else
+ 			priv->trig = priv->changed_ports = 0;
+ 	}
+-	err = mlx4_check_port_params(mdev, types);
++
++	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
++		for (i = 1; i <= mdev->caps.num_ports; i++) {
++			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
++				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
++				err = -EINVAL;
++			}
++		}
++	}
++	if (err) {
++		mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
++			       "Set only 'eth' or 'ib' for both ports "
++			       "(should be the same)\n");
++		goto out;
++	}
++
++	mlx4_do_sense_ports(mdev, new_types, types);
++
++	err = mlx4_check_port_params(mdev, new_types);
+ 	if (err)
+ 		goto out;
+ 
+@@ -481,7 +490,7 @@ static ssize_t set_port_type(struct device *dev,
+ 		priv->port[i + 1].tmp_type = 0;
+ 	}
+ 
+-	err = mlx4_change_port_types(mdev, types);
++	err = mlx4_change_port_types(mdev, new_types);
+ 
+ out:
+ 	mutex_unlock(&priv->port_mutex);
+diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
+index 279afa6..c156a3b 100644
+--- a/drivers/net/mlx4/mlx4.h
++++ b/drivers/net/mlx4/mlx4.h
+@@ -408,11 +408,13 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
+ 
+ void mlx4_handle_catas_err(struct mlx4_dev *dev);
+ 
++void mlx4_do_sense_ports(struct mlx4_dev *dev,
++			 enum mlx4_port_type *stype,
++			 enum mlx4_port_type *defaults);
+ void mlx4_start_sense(struct mlx4_dev *dev);
+ void mlx4_stop_sense(struct mlx4_dev *dev);
+ int mlx4_sense_init(struct mlx4_dev *dev);
+ void mlx4_sense_cleanup(struct mlx4_dev *dev);
+-int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type);
+ int mlx4_check_port_params(struct mlx4_dev *dev,
+ 			   enum mlx4_port_type *port_type);
+ int mlx4_change_port_types(struct mlx4_dev *dev,
+diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
+index 44e1a08..073934c 100644
+--- a/drivers/net/mlx4/sense.c
++++ b/drivers/net/mlx4/sense.c
+@@ -39,7 +39,8 @@
+ #include "mlx4.h"
+ 
+ 
+-int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type)
++static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
++			   enum mlx4_port_type *type)
+ {
+ 	u64 out_param;
+ 	int err = 0;
+@@ -60,27 +61,23 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type)
+ 	return 0;
+ }
+ 
+-static void mlx4_sense_port(struct work_struct *work)
++void mlx4_do_sense_ports(struct mlx4_dev *dev,
++			 enum mlx4_port_type *stype,
++			 enum mlx4_port_type *defaults)
+ {
+-	struct delayed_work *delay = container_of(work, struct delayed_work, work);
+-	struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
+-						sense_poll);
+-	struct mlx4_dev *dev = sense->dev;
+-	struct mlx4_priv *priv = mlx4_priv(dev);
+-	enum mlx4_port_type stype[MLX4_MAX_PORTS];
+-	int err = 0;
++	struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
++	int err;
+ 	int i;
+ 
+-	mutex_lock(&priv->port_mutex);
+ 	for (i = 1; i <= dev->caps.num_ports; i++) {
+ 		stype[i-1] = 0;
+ 		if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
+ 		    dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+ 			err = mlx4_SENSE_PORT(dev, i, &stype[i-1]);
+ 			if (err)
+-				goto sense_again;
++				stype[i-1] = defaults[i-1];
+ 		} else
+-			stype[i-1] = dev->caps.port_type[i];
++			stype[i-1] = defaults[i-1];
+ 	}
+ 
+ 	/*
+@@ -101,7 +98,21 @@ static void mlx4_sense_port(struct work_struct *work)
+ 	 * If sensed nothing, remain in current configuration.
+ 	 */
+         for (i = 0; i < dev->caps.num_ports; i++)
+-		stype[i] = stype[i] ? stype[i] : dev->caps.port_type[i+1];
++		stype[i] = stype[i] ? stype[i] : defaults[i];
++
++}
++
++static void mlx4_sense_port(struct work_struct *work)
++{
++	struct delayed_work *delay = container_of(work, struct delayed_work, work);
++	struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
++						sense_poll);
++	struct mlx4_dev *dev = sense->dev;
++	struct mlx4_priv *priv = mlx4_priv(dev);
++	enum mlx4_port_type stype[MLX4_MAX_PORTS];
++
++	mutex_lock(&priv->port_mutex);
++	mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
+ 
+ 	if (mlx4_check_port_params(dev, stype))
+ 		goto sense_again;
+@@ -116,7 +127,6 @@ sense_again:
+ 				   round_jiffies(MLX4_SENSE_RANGE));
+ }
+ 
+-
+ void mlx4_start_sense(struct mlx4_dev *dev)
+ {
+ 	struct mlx4_priv *priv = mlx4_priv(dev);
+-- 
+1.5.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1010_Fibre-Channel-support.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1010_Fibre-Channel-support.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1010_Fibre-Channel-support.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,374 @@
+From a3859749c13cc3fa2234688f5334ea2e57d1eb8f Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Sun, 16 Nov 2008 10:25:59 +0200
+Subject: [PATCH] mlx4: Fibre Channel support
+
+As we did with QPs, some of the MPTs are pre-reserved
+(the MPTs that are mapped for FEXCHs, 2*64K of them).
+So needed to split the operation of allocating an MPT to two:
+        The allocation of a bit from the bitmap
+        The actual creation of the entry (and it's MTT).
+So, mr_alloc_reserved() is the second part, where you know which MPT number was allocated.
+mr_alloc() is the one that allocates a number from the bitmap.
+Normal users keep using the original mr_alloc().
+For FEXCH, when we know the pre-reserved MPT entry, we call mr_alloc_reserved() directly.
+
+Same with the mr_free() and corresponding mr_free_reserved().
+The first will just put back the bit, the later will actually
+destroy the entry, but will leave the bit set.
+
+map_phys_fmr_fbo() is very much like the original map_phys_fmr()
+ allows setting an FBO (First Byte Offset) for the MPT
+ allows setting the data length for the MPT
+ does not increase the higher bits of the key after every map.
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Signed-off-by: Oren Duer <oren at mellanox.co.il>
+---
+ drivers/net/mlx4/main.c     |    4 +-
+ drivers/net/mlx4/mr.c       |  128 +++++++++++++++++++++++++++++++++++++-----
+ include/linux/mlx4/device.h |   22 +++++++-
+ include/linux/mlx4/qp.h     |   11 +++-
+ 4 files changed, 144 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
+index f22b7e0..6d9cb1f 100644
+--- a/drivers/net/mlx4/main.c
++++ b/drivers/net/mlx4/main.c
+@@ -80,12 +80,12 @@ static char mlx4_version[] __devinitdata =
+ 	DRV_VERSION " (" DRV_RELDATE ")\n";
+ 
+ static struct mlx4_profile default_profile = {
+-	.num_qp		= 1 << 17,
++	.num_qp		= 1 << 18,
+ 	.num_srq	= 1 << 16,
+ 	.rdmarc_per_qp	= 1 << 4,
+ 	.num_cq		= 1 << 16,
+ 	.num_mcg	= 1 << 13,
+-	.num_mpt	= 1 << 17,
++	.num_mpt	= 1 << 19,
+ 	.num_mtt	= 1 << 20,
+ };
+ 
+diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
+index 0caf74c..0352dd3 100644
+--- a/drivers/net/mlx4/mr.c
++++ b/drivers/net/mlx4/mr.c
+@@ -52,7 +52,9 @@ struct mlx4_mpt_entry {
+ 	__be64 length;
+ 	__be32 lkey;
+ 	__be32 win_cnt;
+-	u8	reserved1[3];
++	u8	reserved1;
++	u8	flags2;
++	u8	reserved2;
+ 	u8	mtt_rep;
+ 	__be64 mtt_seg;
+ 	__be32 mtt_sz;
+@@ -71,6 +73,8 @@ struct mlx4_mpt_entry {
+ #define MLX4_MPT_PD_FLAG_RAE	    (1 << 28)
+ #define MLX4_MPT_PD_FLAG_EN_INV	    (3 << 24)
+ 
++#define MLX4_MPT_FLAG2_FBO_EN	     (1 <<  7)
++
+ #define MLX4_MPT_STATUS_SW		0xF0
+ #define MLX4_MPT_STATUS_HW		0x00
+ 
+@@ -263,6 +267,21 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
+ 			    !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+ }
+ 
++int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
++			   u64 iova, u64 size, u32 access, int npages,
++			   int page_shift, struct mlx4_mr *mr)
++{
++	mr->iova       = iova;
++	mr->size       = size;
++	mr->pd	       = pd;
++	mr->access     = access;
++	mr->enabled    = 0;
++	mr->key	       = hw_index_to_key(mridx);
++
++	return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
++}
++EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
++
+ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ 		  int npages, int page_shift, struct mlx4_mr *mr)
+ {
+@@ -274,14 +293,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ 	if (index == -1)
+ 		return -ENOMEM;
+ 
+-	mr->iova       = iova;
+-	mr->size       = size;
+-	mr->pd	       = pd;
+-	mr->access     = access;
+-	mr->enabled    = 0;
+-	mr->key	       = hw_index_to_key(index);
+-
+-	err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
++	err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
++				     access, npages, page_shift, mr);
+ 	if (err)
+ 		mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ 
+@@ -289,9 +302,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ }
+ EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
+ 
+-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
++void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
+ {
+-	struct mlx4_priv *priv = mlx4_priv(dev);
+ 	int err;
+ 
+ 	if (mr->enabled) {
+@@ -303,6 +315,13 @@ void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+ 	}
+ 
+ 	mlx4_mtt_cleanup(dev, &mr->mtt);
++}
++EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
++
++void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
++{
++	struct mlx4_priv *priv = mlx4_priv(dev);
++	mlx4_mr_free_reserved(dev, mr);
+ 	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+ }
+ EXPORT_SYMBOL_GPL(mlx4_mr_free);
+@@ -460,8 +479,16 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
+ 	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ 	int err;
+ 
++	if (!is_power_of_2(dev->caps.num_mpts))
++		return -EINVAL;
++
++	dev->caps.num_fexch_mpts =
++		2 * dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
++	dev->caps.reserved_fexch_mpts_base = dev->caps.num_mpts -
++		dev->caps.num_fexch_mpts;
+ 	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
+-			       ~0, dev->caps.reserved_mrws, 0);
++			       ~0, dev->caps.reserved_mrws,
++			       dev->caps.reserved_fexch_mpts_base);
+ 	if (err)
+ 		return err;
+ 
+@@ -525,8 +552,9 @@ static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
+ 	return 0;
+ }
+ 
+-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
+-		      int npages, u64 iova, u32 *lkey, u32 *rkey)
++int mlx4_map_phys_fmr_fbo(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
++			  u64 *page_list, int npages, u64 iova, u32 fbo,
++			  u32 len, u32 *lkey, u32 *rkey, int same_key)
+ {
+ 	u32 key;
+ 	int i, err;
+@@ -538,7 +566,8 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
+ 	++fmr->maps;
+ 
+ 	key = key_to_hw_index(fmr->mr.key);
+-	key += dev->caps.num_mpts;
++	if (!same_key)
++		key += dev->caps.num_mpts;
+ 	*lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
+ 
+ 	*(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+@@ -554,8 +583,10 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
+ 
+ 	fmr->mpt->key    = cpu_to_be32(key);
+ 	fmr->mpt->lkey   = cpu_to_be32(key);
+-	fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
++	fmr->mpt->length = cpu_to_be64(len);
+ 	fmr->mpt->start  = cpu_to_be64(iova);
++	fmr->mpt->first_byte_offset = cpu_to_be32(fbo & 0x001fffff);
++	fmr->mpt->flags2 = (fbo ? MLX4_MPT_FLAG2_FBO_EN : 0);
+ 
+ 	/* Make MTT entries are visible before setting MPT status */
+ 	wmb();
+@@ -567,6 +598,16 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr_fbo);
++
++int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
++		      int npages, u64 iova, u32 *lkey, u32 *rkey)
++{
++	u32 len = npages * (1ull << fmr->page_shift);
++
++	return mlx4_map_phys_fmr_fbo(dev, fmr, page_list, npages, iova, 0,
++				     len, lkey, rkey, 0);
++}
+ EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
+ 
+ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
+@@ -611,6 +652,49 @@ err_free:
+ }
+ EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+ 
++int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
++			    u32 pd, u32 access, int max_pages,
++			    int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
++{
++	struct mlx4_priv *priv = mlx4_priv(dev);
++	u64 mtt_seg;
++	int err = -ENOMEM;
++
++	if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
++		return -EINVAL;
++
++	/* All MTTs must fit in the same page */
++	if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
++		return -EINVAL;
++
++	fmr->page_shift = page_shift;
++	fmr->max_pages  = max_pages;
++	fmr->max_maps   = max_maps;
++	fmr->maps = 0;
++
++	err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
++				     page_shift, &fmr->mr);
++	if (err)
++		return err;
++
++	mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
++
++	fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
++				    fmr->mr.mtt.first_seg,
++				    &fmr->dma_handle);
++	if (!fmr->mtts) {
++		err = -ENOMEM;
++		goto err_free;
++	}
++
++	return 0;
++
++err_free:
++	mlx4_mr_free_reserved(dev, &fmr->mr);
++	return err;
++}
++EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
++
+ int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+ {
+ 	struct mlx4_priv *priv = mlx4_priv(dev);
+@@ -653,6 +737,18 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+ }
+ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+ 
++int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
++{
++	if (fmr->maps)
++		return -EBUSY;
++
++	fmr->mr.enabled = 0;
++	mlx4_mr_free_reserved(dev, &fmr->mr);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
++
+ int mlx4_SYNC_TPT(struct mlx4_dev *dev)
+ {
+ 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
+index edfb789..b859118 100644
+--- a/include/linux/mlx4/device.h
++++ b/include/linux/mlx4/device.h
+@@ -168,9 +168,8 @@ enum mlx4_special_vlan_idx {
+ 	MLX4_VLAN_REGULAR
+ };
+ 
+-
+ enum {
+-	MLX4_NUM_FEXCH          = 64 * 1024,
++	MLX4_NUM_FEXCH          = MLX4_MAX_PORTS * 64 * 1024,
+ };
+ 
+ #define MLX4_LEAST_ATTACHED_VECTOR	0xffffffff
+@@ -246,6 +245,8 @@ struct mlx4_caps {
+ 	u8			supported_type[MLX4_MAX_PORTS + 1];
+ 	u32			port_mask;
+ 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
++	int                     reserved_fexch_mpts_base;
++	int                     num_fexch_mpts;
+ };
+ 
+ struct mlx4_buf_list {
+@@ -415,6 +416,12 @@ static inline void mlx4_query_steer_cap(struct mlx4_dev *dev, int *log_mac,
+ 		if ((type == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
+ 		     ~(dev)->caps.port_mask) & 1 << ((port)-1))
+ 
++
++static inline int mlx4_get_fexch_mpts_base(struct mlx4_dev *dev)
++{
++	return dev->caps.reserved_fexch_mpts_base;
++}
++
+ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ 		   struct mlx4_buf *buf);
+ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
+@@ -441,8 +448,12 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
+ void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
+ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
+ 
++int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
++			   u64 iova, u64 size, u32 access, int npages,
++			   int page_shift, struct mlx4_mr *mr);
+ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ 		  int npages, int page_shift, struct mlx4_mr *mr);
++void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr);
+ void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
+ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
+ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+@@ -488,13 +499,20 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
+ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+ 
++int mlx4_map_phys_fmr_fbo(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
++			  u64 *page_list, int npages, u64 iova, u32 fbo,
++			  u32 len, u32 *lkey, u32 *rkey, int same_key);
+ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
+ 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
++int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
++			    u32 access, int max_pages, int max_maps,
++			    u8 page_shift, struct mlx4_fmr *fmr);
+ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
+ 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
+ int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
+ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
+ 		    u32 *lkey, u32 *rkey);
++int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
+ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
+ int mlx4_SYNC_TPT(struct mlx4_dev *dev);
+ int mlx4_query_diag_counters(struct mlx4_dev *mlx4_dev, int array_length,
+diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
+index 98722a6..bb5bc75 100644
+--- a/include/linux/mlx4/qp.h
++++ b/include/linux/mlx4/qp.h
+@@ -152,7 +152,16 @@ struct mlx4_qp_context {
+ 	u8			reserved4[2];
+ 	u8			mtt_base_addr_h;
+ 	__be32			mtt_base_addr_l;
+-	u32			reserved5[10];
++	u8			VE;
++	u8			reserved5;
++	__be16			VFT_id_prio;
++	u8			reserved6;
++	u8			exch_size;
++	__be16			exch_base;
++	u8			VFT_hop_cnt;
++	u8			my_fc_id_idx;
++	__be16			reserved7;
++	u32			reserved8[7];
+ };
+ 
+ /* Which firmware version adds support for NEC (NoErrorCompletion) bit */
+-- 
+1.5.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1020_query_internal_dev.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1020_query_internal_dev.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1020_query_internal_dev.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,105 @@
+mlx4: Add API to query interfaces for given internal device
+
+Updated mlx4_en interface to provide a query function for it's
+internal net_device structure.
+
+Signed-off-by: Oren Duer <oren at mellanox.co.il>
+
+Index: ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/en_main.c
+===================================================================
+--- ofed_kernel-2.6.18-EL5.1.orig.orig/drivers/net/mlx4/en_main.c	2008-09-04 14:45:56.000000000 +0300
++++ ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/en_main.c	2008-09-04 14:46:17.440543000 +0300
+@@ -234,10 +234,24 @@ err_free_res:
+ 	return NULL;
+ }
+ 
++enum mlx4_query_reply mlx4_en_query(void *endev_ptr, void *int_dev)
++{
++	struct mlx4_en_dev *mdev = endev_ptr;
++	struct net_device *netdev = int_dev;
++	int p;
++	
++	for (p = 1; p <= MLX4_MAX_PORTS; ++p)
++		if (mdev->pndev[p] == netdev)
++			return p;
++
++	return MLX4_QUERY_NOT_MINE;
++}
++
+ static struct mlx4_interface mlx4_en_interface = {
+ 	.add	= mlx4_en_add,
+ 	.remove	= mlx4_en_remove,
+-	.event	= mlx4_en_event
++	.event	= mlx4_en_event,
++	.query  = mlx4_en_query
+ };
+ 
+ static int __init mlx4_en_init(void)
+Index: ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/intf.c
+===================================================================
+--- ofed_kernel-2.6.18-EL5.1.orig.orig/drivers/net/mlx4/intf.c	2008-09-04 14:45:47.000000000 +0300
++++ ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/intf.c	2008-09-04 14:46:02.196098000 +0300
+@@ -112,6 +112,36 @@ void mlx4_unregister_interface(struct ml
+ }
+ EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
+ 
++struct mlx4_dev *mlx4_query_interface(void *int_dev, int *port)
++{
++	struct mlx4_priv *priv;
++	struct mlx4_device_context *dev_ctx;
++	enum mlx4_query_reply r;
++	unsigned long flags;
++
++	mutex_lock(&intf_mutex);
++
++	list_for_each_entry(priv, &dev_list, dev_list) {
++		spin_lock_irqsave(&priv->ctx_lock, flags);
++		list_for_each_entry(dev_ctx, &priv->ctx_list, list) {
++			if (!dev_ctx->intf->query)
++				continue;
++			r = dev_ctx->intf->query(dev_ctx->context, int_dev);
++			if (r != MLX4_QUERY_NOT_MINE) {
++				*port = r;
++				spin_unlock_irqrestore(&priv->ctx_lock, flags);
++				mutex_unlock(&intf_mutex);
++				return &priv->dev;
++			}
++		}
++		spin_unlock_irqrestore(&priv->ctx_lock, flags);
++	}
++
++	mutex_unlock(&intf_mutex);
++	return NULL;
++}
++EXPORT_SYMBOL_GPL(mlx4_query_interface);
++
+ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
+ {
+ 	struct mlx4_priv *priv = mlx4_priv(dev);
+Index: ofed_kernel-2.6.18-EL5.1.orig/include/linux/mlx4/driver.h
+===================================================================
+--- ofed_kernel-2.6.18-EL5.1.orig.orig/include/linux/mlx4/driver.h	2008-09-04 14:45:47.000000000 +0300
++++ ofed_kernel-2.6.18-EL5.1.orig/include/linux/mlx4/driver.h	2008-09-04 14:46:02.201102000 +0300
+@@ -44,15 +44,22 @@ enum mlx4_dev_event {
+ 	MLX4_DEV_EVENT_PORT_REINIT,
+ };
+ 
++enum mlx4_query_reply {
++	MLX4_QUERY_NOT_MINE	= -1,
++	MLX4_QUERY_MINE_NOPORT 	= 0
++};
++
+ struct mlx4_interface {
+ 	void *			(*add)	 (struct mlx4_dev *dev);
+ 	void			(*remove)(struct mlx4_dev *dev, void *context);
+ 	void			(*event) (struct mlx4_dev *dev, void *context,
+ 					  enum mlx4_dev_event event, int port);
++	enum mlx4_query_reply	(*query) (void *context, void *);
+ 	struct list_head	list;
+ };
+ 
+ int mlx4_register_interface(struct mlx4_interface *intf);
+ void mlx4_unregister_interface(struct mlx4_interface *intf);
++struct mlx4_dev *mlx4_query_interface(void *, int *port);
+ 
+ #endif /* MLX4_DRIVER_H */

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1030_register_vlan_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1030_register_vlan_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1030_register_vlan_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,35 @@
+mlx4_core: Fix VLAN registration
+
+Signed-off-by: Oren Duer <oren at mellanox.co.il>
+
+Index: ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/en_port.c
+===================================================================
+--- ofed_kernel-2.6.18-EL5.1.orig.orig/drivers/net/mlx4/en_port.c	2008-09-04 15:00:59.497911000 +0300
++++ ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/en_port.c	2008-09-04 15:01:16.979282000 +0300
+@@ -129,6 +129,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_d
+ 	context->base_qpn = cpu_to_be32(base_qpn);
+ 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
+ 	context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
++	context->intra_no_vlan = 0;
++	context->no_vlan = MLX4_NO_VLAN_IDX;
++	context->intra_vlan_miss = 0;
++	context->vlan_miss = MLX4_VLAN_MISS_IDX;
+ 
+ 	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+Index: ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/en_port.h
+===================================================================
+--- ofed_kernel-2.6.18-EL5.1.orig.orig/drivers/net/mlx4/en_port.h	2008-09-04 15:00:59.500913000 +0300
++++ ofed_kernel-2.6.18-EL5.1.orig/drivers/net/mlx4/en_port.h	2008-09-04 15:01:16.984285000 +0300
+@@ -75,9 +75,9 @@ struct mlx4_set_port_rqp_calc_context {
+ 	__be32 flags;
+ 	u8 reserved[3];
+ 	u8 mac_miss;
+-	u8 reserved2;
++	u8 intra_no_vlan;
+ 	u8 no_vlan;
+-	u8 reserved3;
++	u8 intra_vlan_miss;
+ 	u8 vlan_miss;
+ 	u8 reserved4[3];
+ 	u8 no_vlan_prio;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1050_remove_fexch_reservation.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1050_remove_fexch_reservation.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1050_remove_fexch_reservation.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,127 @@
+mlx4_core: removed reservation of FEXCH QPs and MPTs
+
+mlx4_fc module will reserve them upon loading.
+Added mpt reserve_range and release_range functions.
+
+Signed-off-by: Oren Duer <oren at mellanox.co.il>
+
+Index: ofed-1.4/include/linux/mlx4/device.h
+===================================================================
+--- ofed-1.4.orig/include/linux/mlx4/device.h	2009-01-05 13:16:27.246419000 +0200
++++ ofed-1.4/include/linux/mlx4/device.h	2009-01-05 14:34:21.571300000 +0200
+@@ -152,7 +152,6 @@ enum qp_region {
+ 	MLX4_QP_REGION_FW = 0,
+ 	MLX4_QP_REGION_ETH_ADDR,
+ 	MLX4_QP_REGION_FC_ADDR,
+-	MLX4_QP_REGION_FC_EXCH,
+ 	MLX4_QP_REGION_COUNT
+ };
+ 
+@@ -168,10 +167,6 @@ enum mlx4_special_vlan_idx {
+ 	MLX4_VLAN_REGULAR
+ };
+ 
+-enum {
+-	MLX4_NUM_FEXCH          = MLX4_MAX_PORTS * 64 * 1024,
+-};
+-
+ #define MLX4_LEAST_ATTACHED_VECTOR	0xffffffff
+ 
+ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
+@@ -245,8 +240,6 @@ struct mlx4_caps {
+ 	u8			supported_type[MLX4_MAX_PORTS + 1];
+ 	u32			port_mask;
+ 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
+-	int                     reserved_fexch_mpts_base;
+-	int                     num_fexch_mpts;
+ };
+ 
+ struct mlx4_buf_list {
+@@ -416,12 +409,6 @@ static inline void mlx4_query_steer_cap(
+ 		if ((type == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
+ 		     ~(dev)->caps.port_mask) & 1 << ((port)-1))
+ 
+-
+-static inline int mlx4_get_fexch_mpts_base(struct mlx4_dev *dev)
+-{
+-	return dev->caps.reserved_fexch_mpts_base;
+-}
+-
+ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ 		   struct mlx4_buf *buf);
+ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
+@@ -448,6 +435,8 @@ int mlx4_mtt_init(struct mlx4_dev *dev, 
+ void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
+ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
+ 
++int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align, u32 *base_mridx);
++void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt);
+ int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ 			   u64 iova, u64 size, u32 access, int npages,
+ 			   int page_shift, struct mlx4_mr *mr);
+Index: ofed-1.4/drivers/net/mlx4/main.c
+===================================================================
+--- ofed-1.4.orig/drivers/net/mlx4/main.c	2009-01-05 12:51:17.000000000 +0200
++++ ofed-1.4/drivers/net/mlx4/main.c	2009-01-05 13:21:06.157378000 +0200
+@@ -326,12 +326,10 @@ static int mlx4_dev_cap(struct mlx4_dev 
+ 		(1 << dev->caps.log_num_vlans)*
+ 		(1 << dev->caps.log_num_prios)*
+ 		dev->caps.num_ports;
+-	dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+ 
+ 	dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
+ 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
+-		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
+-		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
++		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR];
+ 
+ 	return 0;
+ }
+Index: ofed-1.4/drivers/net/mlx4/mr.c
+===================================================================
+--- ofed-1.4.orig/drivers/net/mlx4/mr.c	2009-01-05 12:51:17.000000000 +0200
++++ ofed-1.4/drivers/net/mlx4/mr.c	2009-01-05 13:32:06.731617000 +0200
+@@ -267,6 +267,28 @@ static int mlx4_HW2SW_MPT(struct mlx4_de
+ 			    !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+ }
+ 
++int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align, u32 *base_mridx)
++{
++	struct mlx4_priv *priv = mlx4_priv(dev);
++	u32 mridx;
++
++	mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
++	if (mridx == -1)
++		return -ENOMEM;
++
++	*base_mridx = mridx;
++	return 0;
++
++}
++EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
++
++void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
++{
++	struct mlx4_priv *priv = mlx4_priv(dev);
++	mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
++}
++EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
++
+ int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ 			   u64 iova, u64 size, u32 access, int npages,
+ 			   int page_shift, struct mlx4_mr *mr)
+@@ -482,13 +504,8 @@ int mlx4_init_mr_table(struct mlx4_dev *
+ 	if (!is_power_of_2(dev->caps.num_mpts))
+ 		return -EINVAL;
+ 
+-	dev->caps.num_fexch_mpts =
+-		2 * dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+-	dev->caps.reserved_fexch_mpts_base = dev->caps.num_mpts -
+-		dev->caps.num_fexch_mpts;
+ 	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
+-			       ~0, dev->caps.reserved_mrws,
+-			       dev->caps.reserved_fexch_mpts_base);
++			       ~0, dev->caps.reserved_mrws, 0);
+ 	if (err)
+ 		return err;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1060_set_4k_mtu.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1060_set_4k_mtu.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1060_set_4k_mtu.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,40 @@
+From e530dc29b3faa89d3119d20ab0b46d23b152919b Mon Sep 17 00:00:00 2001
+From: Vladimir Sokolovsky <vlad at mellanox.co.il>
+Date: Thu, 8 Jan 2009 09:45:50 +0200
+Subject: [PATCH] mlx4/IB: Add set_4k_mtu module parameter.
+
+It control Infiniband link MTU for all IB ports in a host.
+
+Signed-off-by: Vladimir Sokolovsky <vlad at mellanox.co.il>
+---
+ drivers/infiniband/hw/mlx4/main.c |    9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+Index: ofa_1_4_dev_kernel/drivers/net/mlx4/port.c
+===================================================================
+--- ofa_1_4_dev_kernel.orig/drivers/net/mlx4/port.c
++++ ofa_1_4_dev_kernel/drivers/net/mlx4/port.c
+@@ -38,6 +38,10 @@
+ 
+ #include "mlx4.h"
+ 
++int mlx4_ib_set_4k_mtu = 0;
++module_param_named(set_4k_mtu, mlx4_ib_set_4k_mtu, int, 0444);
++MODULE_PARM_DESC(set_4k_mtu, "attempt to set 4K MTU to all ConnectX ports");
++
+ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
+ {
+ 	int i;
+@@ -305,8 +309,11 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, 
+ 		((u8 *) mailbox->buf)[3] = 6;
+ 		((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
+ 		((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
+-	} else
++	} else {
++		if (mlx4_ib_set_4k_mtu)
++			((__be32 *) mailbox->buf)[0] |= cpu_to_be32((1 << 22) | (1 << 21) | (5 << 12) | (2 << 4));
+ 		((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
++	}
+ 	err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ 		       MLX4_CMD_TIME_CLASS_B);
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1070-optimize-huge_tlb.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1070-optimize-huge_tlb.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1070-optimize-huge_tlb.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,118 @@
+Since Linux may not merge adjacent pages into a single scatter entry through
+calls to dma_map_sg(), we check the special case of hugetlb pages which are
+likely to be mapped to coniguous dma addresses and if they are, take advantage
+of this. This will result in a significantly lower number of MTT segments used
+for registering hugetlb memory regions.
+
+Signed-off-by: Eli Cohen <eli at mellanox.co.il>
+---
+ drivers/infiniband/hw/mlx4/mr.c |   81 ++++++++++++++++++++++++++++++++++----
+ 1 files changed, 72 insertions(+), 9 deletions(-)
+
+Index: b/drivers/infiniband/hw/mlx4/mr.c
+===================================================================
+--- a/drivers/infiniband/hw/mlx4/mr.c	2008-11-19 21:32:15.000000000 +0200
++++ b/drivers/infiniband/hw/mlx4/mr.c	2009-03-30 18:29:55.000000000 +0300
+@@ -119,6 +119,70 @@ out:
+ 	return err;
+ }
+ 
++static int handle_hugetlb_user_mr(struct ib_pd *pd, struct mlx4_ib_mr *mr,
++				  u64 start, u64 virt_addr, int access_flags)
++{
++#if defined(CONFIG_HUGETLB_PAGE) && !defined(__powerpc__) && !defined(__ia64__)
++	struct mlx4_ib_dev *dev = to_mdev(pd->device);
++	struct ib_umem_chunk *chunk;
++	unsigned dsize;
++	dma_addr_t daddr;
++	unsigned cur_size = 0;
++	dma_addr_t uninitialized_var(cur_addr);
++	int n;
++	struct ib_umem	*umem = mr->umem;
++	u64 *arr;
++	int err = 0;
++	int i;
++	int j = 0;
++	int off = start & (HPAGE_SIZE - 1);
++
++	n = DIV_ROUND_UP(off + umem->length, HPAGE_SIZE);
++	arr = kmalloc(n * sizeof *arr, GFP_KERNEL);
++	if (!arr)
++		return -ENOMEM;
++
++	list_for_each_entry(chunk, &umem->chunk_list, list)
++		for (i = 0; i < chunk->nmap; ++i) {
++			daddr = sg_dma_address(&chunk->page_list[i]);
++			dsize = sg_dma_len(&chunk->page_list[i]);
++			if (!cur_size) {
++				cur_addr = daddr;
++				cur_size = dsize;
++			} else if (cur_addr + cur_size != daddr) {
++				err = -EINVAL;
++				goto out;
++			} else
++				cur_size += dsize;
++
++			if (cur_size > HPAGE_SIZE) {
++				err = -EINVAL;
++				goto out;
++			} else if (cur_size == HPAGE_SIZE) {
++				cur_size = 0;
++				arr[j++] = cur_addr;
++			}
++		}
++
++	if (cur_size) {
++		arr[j++] = cur_addr;
++	}
++
++	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, umem->length,
++			    convert_access(access_flags), n, HPAGE_SHIFT, &mr->mmr);
++	if (err)
++		goto out;
++
++	err = mlx4_write_mtt(dev->dev, &mr->mmr.mtt, 0, n, arr);
++
++out:
++	kfree(arr);
++	return err;
++#else
++	return -ENOSYS;
++#endif
++}
++
+ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ 				  u64 virt_addr, int access_flags,
+ 				  struct ib_udata *udata)
+@@ -140,17 +204,20 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct
+ 		goto err_free;
+ 	}
+ 
+-	n = ib_umem_page_count(mr->umem);
+-	shift = ilog2(mr->umem->page_size);
+-
+-	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
+-			    convert_access(access_flags), n, shift, &mr->mmr);
+-	if (err)
+-		goto err_umem;
+-
+-	err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
+-	if (err)
+-		goto err_mr;
++	if (!mr->umem->hugetlb ||
++	    handle_hugetlb_user_mr(pd, mr, start, virt_addr, access_flags)) {
++		n = ib_umem_page_count(mr->umem);
++		shift = ilog2(mr->umem->page_size);
++
++		err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
++				    convert_access(access_flags), n, shift, &mr->mmr);
++		if (err)
++			goto err_umem;
++
++		err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
++		if (err)
++			goto err_mr;
++	}
+ 
+ 	err = mlx4_mr_enable(dev->dev, &mr->mmr);
+ 	if (err)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1080_no_bf_without_wc.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1080_no_bf_without_wc.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1080_no_bf_without_wc.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,25 @@
+mlx4_ib: Do not enable blueflame sends if write combining is not available.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofed_kernel/drivers/infiniband/hw/mlx4/main.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/main.c	2009-02-26 13:52:09.000000000 +0200
++++ ofed_kernel/drivers/infiniband/hw/mlx4/main.c	2009-02-26 18:05:29.962092000 +0200
+@@ -356,8 +356,14 @@ static struct ib_ucontext *mlx4_ib_alloc
+ 	int err;
+ 
+ 	resp.qp_tab_size      = dev->dev->caps.num_qps;
+-	resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
+-	resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
++
++	if (mlx4_wc_enabled()) {
++		resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
++		resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
++	} else {
++		resp.bf_reg_size      = 0;
++		resp.bf_regs_per_page = 0;
++	}
+ 
+ 	context = kmalloc(sizeof *context, GFP_KERNEL);
+ 	if (!context)

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1090_do_not_set_ethernet_port.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1090_do_not_set_ethernet_port.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1090_do_not_set_ethernet_port.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,54 @@
+From b76ff148f6a72e510bc5ed87b752bfeb4cd2f73e Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Sun, 1 Mar 2009 16:57:51 +0200
+Subject: [PATCH] mlx4_core: Don't perform SET_PORT for Eth port
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/net/mlx4/port.c |   22 ++++++++++------------
+ 1 files changed, 10 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
+index 46bd287..b93fb6c 100644
+--- a/drivers/net/mlx4/port.c
++++ b/drivers/net/mlx4/port.c
+@@ -297,26 +297,24 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
+ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+ {
+ 	struct mlx4_cmd_mailbox *mailbox;
+-	int err;
+-	u8 is_eth = (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) ? 1 : 0;
++	int err = 0;
+ 
+ 	mailbox = mlx4_alloc_cmd_mailbox(dev);
+ 	if (IS_ERR(mailbox))
+ 		return PTR_ERR(mailbox);
+ 
+ 	memset(mailbox->buf, 0, 256);
+-	if (is_eth) {
+-		((u8 *) mailbox->buf)[3] = 6;
+-		((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
+-		((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
+-	} else {
+-		if (mlx4_ib_set_4k_mtu)
+-			((__be32 *) mailbox->buf)[0] |= cpu_to_be32((1 << 22) | (1 << 21) | (5 << 12) | (2 << 4));
+-		((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+-	}
+-	err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
++	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
++		goto out;
++
++	if (mlx4_ib_set_4k_mtu)
++		((__be32 *) mailbox->buf)[0] |= cpu_to_be32((1 << 22) | (1 << 21) | (5 << 12) | (2 << 4));
++	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
++
++	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+ 		       MLX4_CMD_TIME_CLASS_B);
+ 
++out:
+ 	mlx4_free_cmd_mailbox(dev, mailbox);
+ 	return err;
+ }
+-- 
+1.5.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100-mtt_seg_size_param.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100-mtt_seg_size_param.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100-mtt_seg_size_param.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,119 @@
+From 0c1b9ad3d28eb52c18d97aab9a2add48a1313af6 Mon Sep 17 00:00:00 2001
+From: Eli Cohen <eli at mellanox.co.il>
+Date: Mon, 2 Mar 2009 10:20:28 +0200
+Subject: [PATCH] mlx4_core: Use module parameter for number of MTTs per segment
+
+The current MTTs allocator uses kmalloc to allocate buffer for it's buddy
+system implementation and thus is limited by the amount of MTT segments that it
+can control. As a result, the size of memory that can be registered is limited
+too. This patch uses a module parameter to control the number of MTT entries
+that each segment represents, thus allowing to register more memory with the
+same number of segments.
+
+Signed-off-by: Eli Cohen <eli at mellanox.co.il>
+---
+ drivers/net/mlx4/main.c     |   14 ++++++++++++--
+ drivers/net/mlx4/mr.c       |    6 +++---
+ drivers/net/mlx4/profile.c  |    2 +-
+ include/linux/mlx4/device.h |    1 +
+ 4 files changed, 17 insertions(+), 6 deletions(-)
+
+Index: ofed_kernel-fixes/drivers/net/mlx4/main.c
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/net/mlx4/main.c	2009-03-02 11:01:36.000000000 +0200
++++ ofed_kernel-fixes/drivers/net/mlx4/main.c	2009-03-02 11:07:05.000000000 +0200
+@@ -114,6 +114,10 @@ module_param_named(log_num_mtt, mod_para
+ MODULE_PARM_DESC(log_num_mtt,
+ 		 "log maximum number of memory translation table segments per HCA");
+ 
++static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
++module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
++MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
++
+ static void process_mod_param_profile(void)
+ {
+ 	default_profile.num_qp = (mod_param_profile.num_qp ?
+@@ -274,12 +278,13 @@ static int mlx4_dev_cap(struct mlx4_dev 
+ 	dev->caps.max_cqes	     = dev_cap->max_cq_sz - 1;
+ 	dev->caps.reserved_cqs	     = dev_cap->reserved_cqs;
+ 	dev->caps.reserved_eqs	     = dev_cap->reserved_eqs;
++	dev->caps.mtts_per_seg	     = 1 << log_mtts_per_seg;
+ 	dev->caps.reserved_mtts	     = DIV_ROUND_UP(dev_cap->reserved_mtts,
+-						    MLX4_MTT_ENTRY_PER_SEG);
++						    dev->caps.mtts_per_seg);
+ 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
+ 	dev->caps.reserved_uars	     = dev_cap->reserved_uars;
+ 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
+-	dev->caps.mtt_entry_sz	     = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
++	dev->caps.mtt_entry_sz	     = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
+ 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
+ 	dev->caps.flags		     = dev_cap->flags;
+@@ -1451,6 +1456,11 @@ static int __init mlx4_verify_params(voi
+ 		return -1;
+ 	}
+ 
++	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
++		printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
++		return -1;
++	}
++
+ 	return 0;
+ }
+ 
+Index: ofed_kernel-fixes/drivers/net/mlx4/mr.c
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/net/mlx4/mr.c	2009-03-02 11:01:36.000000000 +0200
++++ ofed_kernel-fixes/drivers/net/mlx4/mr.c	2009-03-02 11:06:26.000000000 +0200
+@@ -213,7 +213,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, 
+ 	} else
+ 		mtt->page_shift = page_shift;
+ 
+-	for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
++	for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+ 		++mtt->order;
+ 
+ 	mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
+@@ -391,7 +391,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev,
+ 		mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+ 						   MLX4_MPT_PD_FLAG_RAE);
+ 		mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
+-						   MLX4_MTT_ENTRY_PER_SEG);
++						   dev->caps.mtts_per_seg);
+ 	} else {
+ 		mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+ 	}
+@@ -432,7 +432,7 @@ static int mlx4_write_mtt_chunk(struct m
+ 	    (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
+ 		return -EINVAL;
+ 
+-	if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1))
++	if (start_index & (dev->caps.mtts_per_seg - 1))
+ 		return -EINVAL;
+ 
+ 	mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
+Index: ofed_kernel-fixes/drivers/net/mlx4/profile.c
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/net/mlx4/profile.c	2009-03-02 11:01:12.000000000 +0200
++++ ofed_kernel-fixes/drivers/net/mlx4/profile.c	2009-03-02 11:06:26.000000000 +0200
+@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *d
+ 	profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
+ 	profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
+ 	profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
+-	profile[MLX4_RES_MTT].size    = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
++	profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ 	profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
+ 
+ 	profile[MLX4_RES_QP].num      = request->num_qp;
+Index: ofed_kernel-fixes/include/linux/mlx4/device.h
+===================================================================
+--- ofed_kernel-fixes.orig/include/linux/mlx4/device.h	2009-03-02 11:01:36.000000000 +0200
++++ ofed_kernel-fixes/include/linux/mlx4/device.h	2009-03-02 11:06:26.000000000 +0200
+@@ -209,6 +209,7 @@ struct mlx4_caps {
+ 	int			num_comp_vectors;
+ 	int			num_mpts;
+ 	int			num_mtt_segs;
++	int			mtts_per_seg;
+ 	int			fmr_reserved_mtts;
+ 	int			reserved_mtts;
+ 	int			reserved_mrws;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100_unregister_IB_device_before_CLOSE_PORT.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100_unregister_IB_device_before_CLOSE_PORT.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1100_unregister_IB_device_before_CLOSE_PORT.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,32 @@
+From be857f3803a7ad20a57d8f283d3d9fdc7fdd6206 Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Tue, 10 Mar 2009 14:58:26 +0200
+Subject: [PATCH] mlx4_ib: Unregister IB device before CLOSE_PORT
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/infiniband/hw/mlx4/main.c |    5 +++--
+ 1 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 7ecb001..4ef9357 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -945,11 +945,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
+ 
+ 	sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
+ 
++	mlx4_ib_mad_cleanup(ibdev);
++	ib_unregister_device(&ibdev->ib_dev);
++
+ 	for (p = 1; p <= ibdev->num_ports; ++p)
+ 		mlx4_CLOSE_PORT(dev, p);
+ 
+-	mlx4_ib_mad_cleanup(ibdev);
+-	ib_unregister_device(&ibdev->ib_dev);
+ 	iounmap(ibdev->uar_map);
+ 	mlx4_uar_free(dev, &ibdev->priv_uar);
+ 	mlx4_pd_free(dev, ibdev->priv_pdn);
+-- 
+1.5.3.7
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1110_added_transceiver_type_to_QUERY_PORT.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1110_added_transceiver_type_to_QUERY_PORT.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1110_added_transceiver_type_to_QUERY_PORT.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,99 @@
+From 8bb1eda76b03de7bf72a11caaa3535e0a294fc5c Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Tue, 17 Mar 2009 15:43:44 +0200
+Subject: [PATCH] Added transceiver type to QUERY_PORT
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/net/mlx4/fw.c       |   13 +++++++++++++
+ drivers/net/mlx4/fw.h       |    4 ++++
+ drivers/net/mlx4/main.c     |    4 ++++
+ include/linux/mlx4/device.h |    4 ++++
+ 4 files changed, 25 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
+index ca3ea10..147fc04 100644
+--- a/drivers/net/mlx4/fw.c
++++ b/drivers/net/mlx4/fw.c
+@@ -138,6 +138,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ 	struct mlx4_cmd_mailbox *mailbox;
+ 	u32 *outbox;
+ 	u8 field;
++	u16 field16;
++	u32 field32;
++	u64 field64;
+ 	u16 size;
+ 	u16 stat_rate;
+ 	int err;
+@@ -367,6 +370,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ #define QUERY_PORT_MAX_GID_PKEY_OFFSET		0x07
+ #define QUERY_PORT_MAX_MACVLAN_OFFSET		0x0a
+ #define QUERY_PORT_MAX_VL_OFFSET		0x0b
++#define QUERY_PORT_TRANS_VENDOR_OFFSET		0x18
++#define QUERY_PORT_WAVELENGTH_OFFSET		0x1c
++#define QUERY_PORT_TRANS_CODE_OFFSET		0x20
+ 
+ 		for (i = 1; i <= dev_cap->num_ports; ++i) {
+ 			err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
+@@ -391,6 +397,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ 			dev_cap->log_max_vlans[i] = field >> 4;
+ 			dev_cap->eth_mtu[i] = be16_to_cpu(((u16 *) outbox)[1]);
+ 			dev_cap->def_mac[i] = be64_to_cpu(((u64 *) outbox)[2]);
++			MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
++			dev_cap->trans_type[i] = field32 >> 24;
++			dev_cap->vendor_oui[i] = field32 & 0xffffff;
++			MLX4_GET(field16, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
++			dev_cap->wavelength[i] = field16;
++			MLX4_GET(field64, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
++			dev_cap->trans_code[i] = field64;
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
+index 300ef6b..39a3608 100644
+--- a/drivers/net/mlx4/fw.h
++++ b/drivers/net/mlx4/fw.h
+@@ -73,6 +73,10 @@ struct mlx4_dev_cap {
+ 	int max_pkeys[MLX4_MAX_PORTS + 1];
+ 	u64 def_mac[MLX4_MAX_PORTS + 1];
+ 	int eth_mtu[MLX4_MAX_PORTS + 1];
++	int trans_type[MLX4_MAX_PORTS + 1];
++	int vendor_oui[MLX4_MAX_PORTS + 1];
++	int wavelength[MLX4_MAX_PORTS + 1];
++	u64 trans_code[MLX4_MAX_PORTS + 1];
+ 	u16 stat_rate_support;
+ 	u32 flags;
+ 	int reserved_uars;
+diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
+index 50a6ce3..bff54f0 100644
+--- a/drivers/net/mlx4/main.c
++++ b/drivers/net/mlx4/main.c
+@@ -250,6 +250,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+ 		dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
+ 		dev->caps.def_mac[i]        = dev_cap->def_mac[i];
+ 		dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
++		dev->caps.trans_type[i]	    = dev_cap->trans_type[i];
++		dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
++		dev->caps.wavelength[i]     = dev_cap->wavelength[i];
++		dev->caps.trans_code[i]     = dev_cap->trans_code[i];
+ 	}
+ 
+ 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
+diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
+index 577a072..22f3fc2 100644
+--- a/include/linux/mlx4/device.h
++++ b/include/linux/mlx4/device.h
+@@ -184,6 +184,10 @@ struct mlx4_caps {
+ 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1];
+ 	int			gid_table_len[MLX4_MAX_PORTS + 1];
+ 	int			pkey_table_len[MLX4_MAX_PORTS + 1];
++	int			trans_type[MLX4_MAX_PORTS + 1];
++	int			vendor_oui[MLX4_MAX_PORTS + 1];
++	int			wavelength[MLX4_MAX_PORTS + 1];
++	u64			trans_code[MLX4_MAX_PORTS + 1];
+ 	int			local_ca_ack_delay;
+ 	int			num_uars;
+ 	int			bf_reg_size;
+-- 
+1.5.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1120_fast_reg.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1120_fast_reg.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mlx4_1120_fast_reg.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,79 @@
+mlx4_ib: fix fast registration implementation.
+
+The low-level driver modified the page-list addresses for FRWR post send
+to big-endian, and set a "present" bit. This caused problems later when the
+ULP attempted to unmap the pages in the page-list (using the list addresses which
+were assumed to be still in CPU-endian order).
+
+The cause of the crash was found by Vu Pham of Mellanox.
+The fix is along the lines suggested by Steve Wise in comment #21 in Bugzilla 1571.
+
+This patch fixes Bugzilla 1571.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+
+Index: ofed_kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/mlx4_ib.h	2009-05-07 13:19:00.340990000 +0300
++++ ofed_kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h	2009-05-07 13:21:55.861691000 +0300
+@@ -119,6 +119,7 @@ struct mlx4_ib_mr {
+ 
+ struct mlx4_ib_fast_reg_page_list {
+ 	struct ib_fast_reg_page_list	ibfrpl;
++	u64				*mapped_page_list;
+ 	dma_addr_t			map;
+ };
+ 
+Index: ofed_kernel/drivers/infiniband/hw/mlx4/mr.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/mr.c	2009-05-07 13:19:01.718954000 +0300
++++ ofed_kernel/drivers/infiniband/hw/mlx4/mr.c	2009-05-07 13:19:03.464882000 +0300
+@@ -298,16 +298,22 @@ struct ib_fast_reg_page_list *mlx4_ib_al
+ 	if (!mfrpl)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	mfrpl->ibfrpl.page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
++	mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
++	if (!mfrpl->ibfrpl.page_list)
++		goto err_free;
++
++	mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
+ 						     size, &mfrpl->map,
+ 						     GFP_KERNEL);
+ 	if (!mfrpl->ibfrpl.page_list)
+-		goto err_free;
++		goto err_free_mfrpl;
+ 
+ 	WARN_ON(mfrpl->map & 0x3f);
+ 
+ 	return &mfrpl->ibfrpl;
+ 
++err_free_mfrpl:
++	kfree(mfrpl->ibfrpl.page_list);
+ err_free:
+ 	kfree(mfrpl);
+ 	return ERR_PTR(-ENOMEM);
+@@ -319,8 +325,9 @@ void mlx4_ib_free_fast_reg_page_list(str
+ 	struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
+ 	int size = page_list->max_page_list_len * sizeof (u64);
+ 
+-	dma_free_coherent(&dev->dev->pdev->dev, size, page_list->page_list,
++	dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
+ 			  mfrpl->map);
++	kfree(mfrpl->ibfrpl.page_list);
+ 	kfree(mfrpl);
+ }
+ 
+Index: ofed_kernel/drivers/infiniband/hw/mlx4/qp.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mlx4/qp.c	2009-05-07 13:19:01.114958000 +0300
++++ ofed_kernel/drivers/infiniband/hw/mlx4/qp.c	2009-05-07 13:19:03.474882000 +0300
+@@ -1532,7 +1532,7 @@ static void set_fmr_seg(struct mlx4_wqe_
+ 	int i;
+ 
+ 	for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
+-		wr->wr.fast_reg.page_list->page_list[i] =
++		mfrpl->mapped_page_list[i] =
+ 			cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
+ 				    MLX4_MTT_FLAG_PRESENT);
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0030-mtt_seg_size_param.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0030-mtt_seg_size_param.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0030-mtt_seg_size_param.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,190 @@
+From dd51ae0ee1650758363dd7d3726e2cd4317ecd52 Mon Sep 17 00:00:00 2001
+From: Eli Cohen <eli at mellanox.co.il>
+Date: Mon, 2 Mar 2009 10:51:30 +0200
+Subject: [PATCH] ib_mthca: Use module parameter for number of MTTs per segment
+
+The current MTTs allocator uses kmalloc to allocate buffer for it's buddy
+system implementation and thus is limited by the amount of MTT segments that it
+can control. As a result, the size of memory that can be registered is limited
+too. This patch uses a module parameter to control the number of MTT entries
+that each segment represents, thus allowing to register more memory with the
+same number of segments.
+
+Signed-off-by: Eli Cohen <eli at mellanox.co.il>
+---
+ drivers/infiniband/hw/mthca/mthca_cmd.c     |    2 +-
+ drivers/infiniband/hw/mthca/mthca_dev.h     |    1 +
+ drivers/infiniband/hw/mthca/mthca_main.c    |   15 ++++++++++++---
+ drivers/infiniband/hw/mthca/mthca_mr.c      |   16 ++++++++--------
+ drivers/infiniband/hw/mthca/mthca_profile.c |    4 ++--
+ 5 files changed, 24 insertions(+), 14 deletions(-)
+
+Index: ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_cmd.c
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/infiniband/hw/mthca/mthca_cmd.c	2009-03-02 11:01:12.000000000 +0200
++++ ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_cmd.c	2009-03-02 11:09:43.000000000 +0200
+@@ -1057,7 +1057,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev
+ 	MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
+ 	if (mthca_is_memfree(dev))
+ 		dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
+-					       MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE;
++					       dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size;
+ 	else
+ 		dev_lim->reserved_mtts = 1 << (field >> 4);
+ 	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
+Index: ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_dev.h
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/infiniband/hw/mthca/mthca_dev.h	2009-03-02 11:01:12.000000000 +0200
++++ ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_dev.h	2009-03-02 11:09:43.000000000 +0200
+@@ -159,6 +159,7 @@ struct mthca_limits {
+ 	int      reserved_eqs;
+ 	int      num_mpts;
+ 	int      num_mtt_segs;
++	int	 mtt_seg_size;
+ 	int      fmr_reserved_mtts;
+ 	int      reserved_mtts;
+ 	int      reserved_mrws;
+Index: ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_main.c
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/infiniband/hw/mthca/mthca_main.c	2009-03-02 11:01:12.000000000 +0200
++++ ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_main.c	2009-03-03 07:31:49.000000000 +0200
+@@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hc
+ MODULE_PARM_DESC(fmr_reserved_mtts,
+ 		 "number of memory translation table segments reserved for FMR");
+ 
++static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
++module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
++MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
++
+ static char mthca_version[] __devinitdata =
+ 	DRV_NAME ": Mellanox InfiniBand HCA driver v"
+ 	DRV_VERSION " (" DRV_RELDATE ")\n";
+@@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_de
+ 	int err;
+ 	u8 status;
+ 
++	mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
+ 	err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
+ 	if (err) {
+ 		mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+@@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_d
+ 	}
+ 
+ 	/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
+-	mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE,
+-					   dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE;
++	mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
++					   dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
+ 
+ 	mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
+-							 MTHCA_MTT_SEG_SIZE,
++							 mdev->limits.mtt_seg_size,
+ 							 mdev->limits.num_mtt_segs,
+ 							 mdev->limits.reserved_mtts,
+ 							 1, 0);
+@@ -1368,6 +1373,12 @@ static void __init mthca_validate_profil
+ 		printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
+ 		       hca_profile.fmr_reserved_mtts);
+ 	}
++
++	if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
++		printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
++		       log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
++		log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
++	}
+ }
+ 
+ static int __init mthca_init(void)
+Index: ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_mr.c
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/infiniband/hw/mthca/mthca_mr.c	2009-03-02 11:01:12.000000000 +0200
++++ ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_mr.c	2009-03-02 11:09:43.000000000 +0200
+@@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_m
+ 
+ 	mtt->buddy = buddy;
+ 	mtt->order = 0;
+-	for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
++	for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
+ 		++mtt->order;
+ 
+ 	mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
+@@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthc
+ 
+ 	while (list_len > 0) {
+ 		mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
+-					   mtt->first_seg * MTHCA_MTT_SEG_SIZE +
++					   mtt->first_seg * dev->limits.mtt_seg_size +
+ 					   start_index * 8);
+ 		mtt_entry[1] = 0;
+ 		for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
+@@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(st
+ 	u64 __iomem *mtts;
+ 	int i;
+ 
+-	mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * MTHCA_MTT_SEG_SIZE +
++	mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
+ 		start_index * sizeof (u64);
+ 	for (i = 0; i < list_len; ++i)
+ 		mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
+@@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(st
+ 	/* For Arbel, all MTTs must fit in the same page. */
+ 	BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
+ 	/* Require full segments */
+-	BUG_ON(s % MTHCA_MTT_SEG_SIZE);
++	BUG_ON(s % dev->limits.mtt_seg_size);
+ 
+ 	mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
+-				s / MTHCA_MTT_SEG_SIZE, &dma_handle);
++				s / dev->limits.mtt_seg_size, &dma_handle);
+ 
+ 	BUG_ON(!mtts);
+ 
+@@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev
+ 	if (mr->mtt)
+ 		mpt_entry->mtt_seg =
+ 			cpu_to_be64(dev->mr_table.mtt_base +
+-				    mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
++				    mr->mtt->first_seg * dev->limits.mtt_seg_size);
+ 
+ 	if (0) {
+ 		mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
+@@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *de
+ 		goto err_out_table;
+ 	}
+ 
+-	mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
++	mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
+ 
+ 	if (mthca_is_memfree(dev)) {
+ 		mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
+@@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev
+ 			 dev->mr_table.mtt_base);
+ 
+ 		dev->mr_table.tavor_fmr.mtt_base =
+-			ioremap(addr, mtts * MTHCA_MTT_SEG_SIZE);
++			ioremap(addr, mtts * dev->limits.mtt_seg_size);
+ 		if (!dev->mr_table.tavor_fmr.mtt_base) {
+ 			mthca_warn(dev, "MTT ioremap for FMR failed.\n");
+ 			err = -ENOMEM;
+Index: ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_profile.c
+===================================================================
+--- ofed_kernel-fixes.orig/drivers/infiniband/hw/mthca/mthca_profile.c	2009-03-02 11:01:12.000000000 +0200
++++ ofed_kernel-fixes/drivers/infiniband/hw/mthca/mthca_profile.c	2009-03-02 11:09:43.000000000 +0200
+@@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev 
+ 	profile[MTHCA_RES_RDB].size  = MTHCA_RDB_ENTRY_SIZE;
+ 	profile[MTHCA_RES_MCG].size  = MTHCA_MGM_ENTRY_SIZE;
+ 	profile[MTHCA_RES_MPT].size  = dev_lim->mpt_entry_sz;
+-	profile[MTHCA_RES_MTT].size  = MTHCA_MTT_SEG_SIZE;
++	profile[MTHCA_RES_MTT].size  = dev->limits.mtt_seg_size;
+ 	profile[MTHCA_RES_UAR].size  = dev_lim->uar_scratch_entry_sz;
+ 	profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
+ 	profile[MTHCA_RES_UARC].size = request->uarc_size;
+@@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev 
+ 			dev->limits.num_mtt_segs = profile[i].num;
+ 			dev->mr_table.mtt_base   = profile[i].start;
+ 			init_hca->mtt_base       = profile[i].start;
+-			init_hca->mtt_seg_sz     = ffs(MTHCA_MTT_SEG_SIZE) - 7;
++			init_hca->mtt_seg_sz     = ffs(dev->limits.mtt_seg_size) - 7;
+ 			break;
+ 		case MTHCA_RES_UAR:
+ 			dev->limits.num_uars       = profile[i].num;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0040_fix_cmd_timeouts.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0040_fix_cmd_timeouts.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/mthca_0040_fix_cmd_timeouts.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,77 @@
+ib_mthca: bring INIT_HCA and other commands timeout into consistency with PRM
+
+Commands INIT_HCA, CLOSE_HCA, SYS_EN, SYS_DIS, and CLOSE_IB all had 1 second
+timeouts.  For INIT_HCA this caused problems when had more than 2^18 max qp's
+configured.
+
+All other commands have 60-second timeouts.  This patch brings the above commands
+into consistency with the rest of the commands.
+
+This patch is an expansion of the INIT_HCA timeout patch submitted by A. Kepner.
+
+Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
+ 
+Index: ofed_kernel/drivers/infiniband/hw/mthca/mthca_cmd.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/mthca/mthca_cmd.c	2009-04-19 14:54:12.000000000 +0300
++++ ofed_kernel/drivers/infiniband/hw/mthca/mthca_cmd.c	2009-04-19 15:49:13.655998000 +0300
+@@ -157,13 +157,15 @@ enum {
+ enum {
+ 	CMD_TIME_CLASS_A = (HZ + 999) / 1000 + 1,
+ 	CMD_TIME_CLASS_B = (HZ +  99) /  100 + 1,
+-	CMD_TIME_CLASS_C = (HZ +   9) /   10 + 1
++	CMD_TIME_CLASS_C = (HZ +   9) /   10 + 1,
++	CMD_TIME_CLASS_D = 60 * HZ
+ };
+ #else
+ enum {
+ 	CMD_TIME_CLASS_A = 60 * HZ,
+ 	CMD_TIME_CLASS_B = 60 * HZ,
+-	CMD_TIME_CLASS_C = 60 * HZ
++	CMD_TIME_CLASS_C = 60 * HZ,
++	CMD_TIME_CLASS_D = 60 * HZ
+ };
+ #endif
+ 
+@@ -598,7 +600,7 @@ int mthca_SYS_EN(struct mthca_dev *dev, 
+ 	u64 out;
+ 	int ret;
+ 
+-	ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status);
++	ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D, status);
+ 
+ 	if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
+ 		mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, "
+@@ -611,7 +613,7 @@ int mthca_SYS_EN(struct mthca_dev *dev, 
+ 
+ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
+ {
+-	return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status);
++	return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
+ }
+ 
+ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
+@@ -1390,7 +1392,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev
+ 		MTHCA_PUT(inbox, param->uarc_base,   INIT_HCA_UAR_CTX_BASE_OFFSET);
+ 	}
+ 
+-	err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
++	err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, CMD_TIME_CLASS_D, status);
+ 
+ 	mthca_free_mailbox(dev, mailbox);
+ 	return err;
+@@ -1450,12 +1452,12 @@ int mthca_INIT_IB(struct mthca_dev *dev,
+ 
+ int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
+ {
+-	return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status);
++	return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A, status);
+ }
+ 
+ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
+ {
+-	return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, HZ, status);
++	return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C, status);
+ }
+ 
+ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0010_cqp_request.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0010_cqp_request.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0010_cqp_request.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,17 +1,3 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: cqp_request list cleanup
-
-Use nes_free_cqp_request() from commit 
-1ff66e8c1faee7c2711b84b9c89e1c5fcd767839.
-
-Change some continue to break in nes_cm_timer_tick.  Send_entry
-was a list processed in a loop, thus continue.  Now it is a single
-item, changing continue to break to be semantically correct.
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
 index 2caf9da..2a1d6c7 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c
@@ -69,7 +55,7 @@
 index d36c9a0..4fdb724 100644
 --- a/drivers/infiniband/hw/nes/nes_verbs.c
 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
-@@ -1695,13 +1695,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+@@ -1696,13 +1696,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
  			/* use 4k pbl */
  			nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries);
  			if (nesadapter->free_4kpbl == 0) {
@@ -85,7 +71,7 @@
  				if (!context)
  					pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
  							nescq->hw_cq.cq_pbase);
-@@ -1717,13 +1712,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+@@ -1718,13 +1713,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
  			/* use 256 byte pbl */
  			nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries);
  			if (nesadapter->free_256pbl == 0) {
@@ -101,7 +87,7 @@
  				if (!context)
  					pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
  							nescq->hw_cq.cq_pbase);
-@@ -1928,13 +1918,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+@@ -1929,13 +1919,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
  			/* Two level PBL */
  			if ((pbl_count+1) > nesadapter->free_4kpbl) {
  				nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n");
@@ -117,7 +103,7 @@
  				return -ENOMEM;
  			} else {
  				nesadapter->free_4kpbl -= pbl_count+1;
-@@ -1942,13 +1927,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+@@ -1943,13 +1928,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
  		} else if (residual_page_count > 32) {
  			if (pbl_count > nesadapter->free_4kpbl) {
  				nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n");
@@ -133,7 +119,7 @@
  				return -ENOMEM;
  			} else {
  				nesadapter->free_4kpbl -= pbl_count;
-@@ -1956,13 +1936,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+@@ -1957,13 +1937,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
  		} else {
  			if (pbl_count > nesadapter->free_256pbl) {
  				nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n");

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0020_connected_nodes_list.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0020_connected_nodes_list.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0020_connected_nodes_list.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,15 +1,3 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: properly lock down connected_nodes list while processing it
-
-While processing connected_nodes list, we would release the lock when
-we need to send reset to remote partner.  That created a window where
-the list can be modified.  Change this into a two step process.  Place
-nodes that need processing on a local list then process the local list.
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
 index 2a1d6c7..257d994 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0030_tx-free_list.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0030_tx-free_list.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0030_tx-free_list.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,16 +1,3 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: Remove tx_free_list
-
-There is no lock protecting tx_free_list thus causing a system crash
-when skb_dequeue() is called and the list is empty.  Since it did not give
-any performance boost under heavy load, removing it to simplfy the code.
-Change get_free_pkt call to allocate MAX_CM_BUFFER skb for connection 
-establishment/teardown as well as MPA request/response.
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
 index 257d994..fe07797 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0040_race_condition.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0040_race_condition.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0040_race_condition.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,16 +1,3 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: Avoid race condition between MPA request and reset event to rdma_cm
-
-In passive open after indicating MPA request to rdma_cm, an incoming RST
-can cause rdma_cm to crash since the current state is not connected.  The 
-solution is to wait for nes_accept() or nes_reject() before firing the 
-reset event.  If nes_accept() or nes_reject() is already done, then the 
-reset event will fire when RST is processed.
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
 index fe07797..01fd309 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0050_stale_APBVT.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0050_stale_APBVT.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0050_stale_APBVT.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,15 +1,3 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: Do not drop packets for a new connection with stale APBVT entry
-
-Under heavy traffic, there is a small windows when an APBVT entry is not
-yet removed and a new connection is established.  Packets for the new
-connection are dropped until APBVT entry is removed.  This patch will
-forward the packets instead of dropping them.
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
 index 01fd309..fd2dba7 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0060_TCP_compliance.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0060_TCP_compliance.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0060_TCP_compliance.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,17 +1,3 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: Fix TCP compliance test failures
-
-From ANVL testing, we are not handling all cm_node states during connection
-establishment.  Add missing state handlers.
-
-Fixed sequence number
-
-Send reset in handle_tcp_options()
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
 index fd2dba7..cc10da1 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0070_cqp_avail_reqs.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0070_cqp_avail_reqs.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0070_cqp_avail_reqs.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,14 +1,3 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: Check cqp_avail_reqs is empty after locking the list
-
-Between the first empty list check and locking the list, the list
-can change.  Check it again after it is locked to make sure
-the list is not empty.
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
 index fb8cbd7..5611a73 100644
 --- a/drivers/infiniband/hw/nes/nes_utils.c

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0080_accept_pend_cnt.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0080_accept_pend_cnt.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0080_accept_pend_cnt.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,84 +0,0 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-RDMA/nes: Change accept_pend_cnt to atomic
-
-There is a race condition on accept_pend_cnt.  Change it to atomic.
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
-diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
-index cc10da1..0025a7e 100644
---- a/drivers/infiniband/hw/nes/nes_cm.c
-+++ b/drivers/infiniband/hw/nes/nes_cm.c
-@@ -976,7 +976,7 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
- 	u32 was_timer_set;
- 	cm_node->accelerated = 1;
- 
--	if (cm_node->accept_pend) {
-+	if (atomic_dec_and_test(&cm_node->accept_pend)) {
- 		BUG_ON(!cm_node->listener);
- 		atomic_dec(&cm_node->listener->pend_accepts_cnt);
- 		BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
-@@ -1091,7 +1091,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
- 	atomic_inc(&cm_core->node_cnt);
- 	cm_node->conn_type = cm_info->conn_type;
- 	cm_node->apbvt_set = 0;
--	cm_node->accept_pend = 0;
-+	atomic_set(&cm_node->accept_pend, 0);
- 
- 	cm_node->nesvnic = nesvnic;
- 	/* get some device handles, for arp lookup */
-@@ -1156,7 +1156,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
- 	spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
- 
- 	/* if the node is destroyed before connection was accelerated */
--	if (!cm_node->accelerated && cm_node->accept_pend) {
-+	if (!cm_node->accelerated && atomic_read(&cm_node->accept_pend)) {
- 		BUG_ON(!cm_node->listener);
- 		atomic_dec(&cm_node->listener->pend_accepts_cnt);
- 		BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
-@@ -1477,25 +1477,25 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- 		break;
- 	case NES_CM_STATE_LISTENING:
- 		/* Passive OPEN */
--		cm_node->accept_pend = 1;
--		atomic_inc(&cm_node->listener->pend_accepts_cnt);
- 		if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
- 				cm_node->listener->backlog) {
- 			nes_debug(NES_DBG_CM, "drop syn due to backlog "
- 				"pressure \n");
- 			cm_backlog_drops++;
--			passive_open_err(cm_node, skb, 0);
-+			rem_ref_cm_node(cm_node->cm_core, cm_node);
-+			dev_kfree_skb_any(skb);
- 			break;
- 		}
- 		ret = handle_tcp_options(cm_node, tcph, skb, optionsize,
- 			1);
- 		if (ret) {
--			passive_open_err(cm_node, skb, 0);
--			/* drop pkt */
- 			break;
- 		}
- 		cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
- 		BUG_ON(cm_node->send_entry);
-+		atomic_set(&cm_node->accept_pend, 1);
-+		atomic_inc(&cm_node->listener->pend_accepts_cnt);
-+
- 		cm_node->state = NES_CM_STATE_SYN_RCVD;
- 		send_syn(cm_node, 1, skb);
- 		break;
-diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
-index fafa350..7600365 100644
---- a/drivers/infiniband/hw/nes/nes_cm.h
-+++ b/drivers/infiniband/hw/nes/nes_cm.h
-@@ -294,7 +294,7 @@ struct nes_cm_node {
- 	enum nes_cm_conn_type     conn_type;
- 	struct nes_vnic           *nesvnic;
- 	int                       apbvt_set;
--	int                       accept_pend;
-+	atomic_t                  accept_pend;
- 	int			freed;
- 	struct list_head	timer_entry;
- 	struct list_head	reset_entry;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0090_warning_cleanup.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0090_warning_cleanup.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0090_warning_cleanup.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,15 +1,3 @@
-From:  Chien Tung <chien.tin.tung at intel.com>
-
-RDMA/nes: warning cleanup
-
-Wrapped NES_DEBUG and assert macros with do while (0) to avoid ambiguous else.
-
-Take out unused pointer returned from form_cm_frame().
-
-drop_packet() should not increment reset counter on receiving a FIN.
-
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
 index 1595dc7..13a5bb1 100644
 --- a/drivers/infiniband/hw/nes/nes.h

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0100_make_cm_node_loopback_check.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0100_make_cm_node_loopback_check.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0100_make_cm_node_loopback_check.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,11 +1,3 @@
-From:  Chien Tung <chien.tin.tung at intel.com>
-
-RDMA/nes: Add loopback check to make_cm_node()
-
-Check for loopback connection in make_cm_node()
-
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
 index 24855ec..9cbea51 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0110_copyright.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0110_copyright.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0110_copyright.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,155 +1,135 @@
-From:  Chien Tung <chien.tin.tung at intel.com>
-
-RDMA/nes: update copyright to new legal entity
-
-Per lawyer's request, update copyright to the new
-legal entity, Intel-NE, Inc. an Intel company.
-
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
- drivers/infiniband/hw/nes/nes.c         |    2 +-
- drivers/infiniband/hw/nes/nes.h         |    2 +-
- drivers/infiniband/hw/nes/nes_cm.c      |    2 +-
- drivers/infiniband/hw/nes/nes_cm.h      |    2 +-
- drivers/infiniband/hw/nes/nes_context.h |    2 +-
- drivers/infiniband/hw/nes/nes_hw.c      |    2 +-
- drivers/infiniband/hw/nes/nes_hw.h      |    2 +-
- drivers/infiniband/hw/nes/nes_nic.c     |    2 +-
- drivers/infiniband/hw/nes/nes_user.h    |    2 +-
- drivers/infiniband/hw/nes/nes_utils.c   |    2 +-
- drivers/infiniband/hw/nes/nes_verbs.c   |    2 +-
- drivers/infiniband/hw/nes/nes_verbs.h   |    2 +-
- 12 files changed, 12 insertions(+), 12 deletions(-)
-
 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
-index aa1dc41..0929f4e 100644
+index aa1dc41..0696b4c 100644
 --- a/drivers/infiniband/hw/nes/nes.c
 +++ b/drivers/infiniband/hw/nes/nes.c
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
   *
   * This software is available to you under a choice of one of two
 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
-index 7bdacd0..b1621e2 100644
+index bd9be4f..09dab29 100644
 --- a/drivers/infiniband/hw/nes/nes.h
 +++ b/drivers/infiniband/hw/nes/nes.h
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
   *
   * This software is available to you under a choice of one of two
 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
-index 2caf9da..69b7798 100644
+index 0343a7d..246619b 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.c
 +++ b/drivers/infiniband/hw/nes/nes_cm.c
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
-index 367b3d2..0def568 100644
+index fafa350..4ab2beb 100644
 --- a/drivers/infiniband/hw/nes/nes_cm.h
 +++ b/drivers/infiniband/hw/nes/nes_cm.h
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
-index da9daba..3861d2e 100644
+index da9daba..0fb8d81 100644
 --- a/drivers/infiniband/hw/nes/nes_context.h
 +++ b/drivers/infiniband/hw/nes/nes_context.h
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
-index eca3520..5e1cbf4 100644
+index b0cc16d..1060008 100644
 --- a/drivers/infiniband/hw/nes/nes_hw.c
 +++ b/drivers/infiniband/hw/nes/nes_hw.c
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
-index bc0b4de..09dc937 100644
+index bc0b4de..6f8712d 100644
 --- a/drivers/infiniband/hw/nes/nes_hw.h
 +++ b/drivers/infiniband/hw/nes/nes_hw.h
 @@ -1,5 +1,5 @@
  /*
 -* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+* Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++* Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
-index 7303586..5c186d8 100644
+index 7303586..9094310 100644
 --- a/drivers/infiniband/hw/nes/nes_nic.c
 +++ b/drivers/infiniband/hw/nes/nes_nic.c
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
-index e64306b..2388c90 100644
+index e64306b..cc90c14 100644
 --- a/drivers/infiniband/hw/nes/nes_user.h
 +++ b/drivers/infiniband/hw/nes/nes_user.h
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect.  All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
 diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
-index fb8cbd7..c6c8d10 100644
+index 5611a73..9e26898 100644
 --- a/drivers/infiniband/hw/nes/nes_utils.c
 +++ b/drivers/infiniband/hw/nes/nes_utils.c
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
-index a954794..511aaf6 100644
+index 9a6f587..76e6421 100644
 --- a/drivers/infiniband/hw/nes/nes_verbs.c
 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   *
   * This software is available to you under a choice of one of two
   * licenses.  You may choose to be licensed under the terms of the GNU
 diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
-index 6c6b4da..4d0a03a 100644
+index 6c6b4da..da3c368 100644
 --- a/drivers/infiniband/hw/nes/nes_verbs.h
 +++ b/drivers/infiniband/hw/nes/nes_verbs.h
 @@ -1,5 +1,5 @@
  /*
 - * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
-+ * Copyright (c) 2006 - 2008 Intel-NE, Inc.  All rights reserved.
++ * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
   * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
   *
   * This software is available to you under a choice of one of two
+-- 
+1.5.3.3
+

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0120_free_skb.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0120_free_skb.patch	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0120_free_skb.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,45 +0,0 @@
-From: Faisal Latif <faisal.latif at intel.com>
-
-With heavy 1-byte UDP TX traffic, we hit a soft lockup due to dev_kfree_skb
-in TX path.  Free header-only (1 fragment) skbs in nes_nic_ce_handler()
-instead of nes_nic_send() (now all skbs are freed in nes_nic_ce_handler()).
-
-Signed-off-by: Faisal Latif <faisal.latif at intel.com>
-Signed-off-by: Chien Tung <chien.tin.tung at intel.com>
-
----
- drivers/infiniband/hw/nes/nes_hw.c  |    4 ++--
- drivers/infiniband/hw/nes/nes_nic.c |    3 +--
- 2 files changed, 3 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
-index eca3520..e253098 100644
---- a/drivers/infiniband/hw/nes/nes_hw.c
-+++ b/drivers/infiniband/hw/nes/nes_hw.c
-@@ -2622,9 +2622,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
- 						} else
- 							break;
- 					}
--					if (skb)
--						dev_kfree_skb_any(skb);
- 				}
-+				if (skb)
-+					dev_kfree_skb_any(skb);
- 				nesnic->sq_tail++;
- 				nesnic->sq_tail &= nesnic->sq_size-1;
- 				if (sq_cqes > 128) {
-diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
-index 7303586..f8dbf14 100644
---- a/drivers/infiniband/hw/nes/nes_nic.c
-+++ b/drivers/infiniband/hw/nes/nes_nic.c
-@@ -401,8 +401,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
- 	if (skb_headlen(skb) == skb->len) {
- 		if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
- 			nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
--			nesnic->tx_skb[nesnic->sq_head] = NULL;
--			dev_kfree_skb(skb);
-+			nesnic->tx_skb[nesnic->sq_head] = skb;
- 		}
- 	} else {
- 		/* Deal with Fragments */
-

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0130_pbl_accounting.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0130_pbl_accounting.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0130_pbl_accounting.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,124 @@
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 76e6421..a165a82 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -551,6 +551,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
+ 	struct nes_device *nesdev = nesvnic->nesdev;
+ 	struct nes_adapter *nesadapter = nesdev->nesadapter;
+ 	int i = 0;
++	int rc;
+ 
+ 	/* free the resources */
+ 	if (nesfmr->leaf_pbl_cnt == 0) {
+@@ -572,6 +573,8 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
+ 	nesmr->ibmw.rkey = ibfmr->rkey;
+ 	nesmr->ibmw.uobject = NULL;
+ 
++	rc = nes_dealloc_mw(&nesmr->ibmw);
++
+ 	if (nesfmr->nesmr.pbls_used != 0) {
+ 		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ 		if (nesfmr->nesmr.pbl_4k) {
+@@ -584,7 +587,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
+ 		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ 	}
+ 
+-	return nes_dealloc_mw(&nesmr->ibmw);
++	return rc;
+ }
+ 
+ 
+@@ -1994,7 +1997,16 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ 			stag, ret, cqp_request->major_code, cqp_request->minor_code);
+ 	major_code = cqp_request->major_code;
+ 	nes_put_cqp_request(nesdev, cqp_request);
+-
++	if ((!ret || major_code) && pbl_count != 0) {
++		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
++		if (pbl_count > 1)
++			nesadapter->free_4kpbl += pbl_count+1;
++		else if (residual_page_count > 32)
++			nesadapter->free_4kpbl += pbl_count;
++		else
++			nesadapter->free_256pbl += pbl_count;
++		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++	}
+ 	if (!ret)
+ 		return -ETIME;
+ 	else if (major_code)
+@@ -2610,24 +2622,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
+ 	cqp_request->waiting = 1;
+ 	cqp_wqe = &cqp_request->cqp_wqe;
+ 
+-	spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+-	if (nesmr->pbls_used != 0) {
+-		if (nesmr->pbl_4k) {
+-			nesadapter->free_4kpbl += nesmr->pbls_used;
+-			if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
+-				printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n",
+-						nesadapter->free_4kpbl, nesadapter->max_4kpbl);
+-			}
+-		} else {
+-			nesadapter->free_256pbl += nesmr->pbls_used;
+-			if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
+-				printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n",
+-						nesadapter->free_256pbl, nesadapter->max_256pbl);
+-			}
+-		}
+-	}
+-
+-	spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ 	nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ 	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ 			NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
+@@ -2645,11 +2639,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
+ 			" CQP Major:Minor codes = 0x%04X:0x%04X\n",
+ 			ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
+ 
+-	nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+-			(ib_mr->rkey & 0x0fffff00) >> 8);
+-
+-	kfree(nesmr);
+-
+ 	major_code = cqp_request->major_code;
+ 	minor_code = cqp_request->minor_code;
+ 
+@@ -2665,8 +2654,33 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
+ 				" to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
+ 				major_code, minor_code, ib_mr, ib_mr->rkey);
+ 		return -EIO;
+-	} else
+-		return 0;
++	}
++
++	if (nesmr->pbls_used != 0) {
++		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
++		if (nesmr->pbl_4k) {
++			nesadapter->free_4kpbl += nesmr->pbls_used;
++			if (nesadapter->free_4kpbl > nesadapter->max_4kpbl)
++				printk(KERN_ERR PFX "free 4KB PBLs(%u) has "
++					"exceeded the max(%u)\n",
++					nesadapter->free_4kpbl,
++					nesadapter->max_4kpbl);
++		} else {
++			nesadapter->free_256pbl += nesmr->pbls_used;
++			if (nesadapter->free_256pbl > nesadapter->max_256pbl)
++				printk(KERN_ERR PFX "free 256B PBLs(%u) has "
++					"exceeded the max(%u)\n",
++					nesadapter->free_256pbl,
++					nesadapter->max_256pbl);
++		}
++		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++	}
++	nes_free_resource(nesadapter, nesadapter->allocated_mrs,
++			(ib_mr->rkey & 0x0fffff00) >> 8);
++
++	kfree(nesmr);
++
++	return 0;
+ }
+ 
+ 
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0140_new_pbl_scheme.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0140_new_pbl_scheme.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0140_new_pbl_scheme.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,280 @@
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index a165a82..2f0545d 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1888,21 +1888,75 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
+ 	return ret;
+ }
+ 
++/**
++ * root_256
++ */
++static u32 root_256(struct nes_device *nesdev,
++		    struct nes_root_vpbl *root_vpbl,
++		    struct nes_root_vpbl *new_root,
++		    u16 pbl_count_4k,
++		    u16 pbl_count_256)
++{
++	u64 leaf_pbl;
++	int i, j, k;
++
++	if (pbl_count_4k == 1) {
++		new_root->pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
++						512, &new_root->pbl_pbase);
++
++		if (new_root->pbl_vbase == NULL)
++			return 0;
++
++		leaf_pbl = (u64)root_vpbl->pbl_pbase;
++		for (i = 0; i < 16; i++) {
++			new_root->pbl_vbase[i].pa_low =
++				cpu_to_le32((u32)leaf_pbl);
++			new_root->pbl_vbase[i].pa_high =
++				cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
++			leaf_pbl += 256;
++		}
++	} else {
++		for (i = 3; i >= 0; i--) {
++			j = i * 16;
++			root_vpbl->pbl_vbase[j] = root_vpbl->pbl_vbase[i];
++			leaf_pbl = le32_to_cpu(root_vpbl->pbl_vbase[j].pa_low) +
++			    (((u64)le32_to_cpu(root_vpbl->pbl_vbase[j].pa_high))
++				<< 32);
++			for (k = 1; k < 16; k++) {
++				leaf_pbl += 256;
++				root_vpbl->pbl_vbase[j + k].pa_low =
++						cpu_to_le32((u32)leaf_pbl);
++				root_vpbl->pbl_vbase[j + k].pa_high =
++				    cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
++			}
++		}
++	}
++
++	return 1;
++}
++
+ 
+ /**
+  * nes_reg_mr
+  */
+ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ 		u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
+-		dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count,
+-		int acc, u64 *iova_start)
++		dma_addr_t single_buffer, u16 pbl_count_4k,
++		u16 residual_page_count_4k, int acc, u64 *iova_start,
++		u16 *actual_pbl_cnt, u8 *used_4k_pbls)
+ {
+ 	struct nes_hw_cqp_wqe *cqp_wqe;
+ 	struct nes_cqp_request *cqp_request;
+ 	unsigned long flags;
+ 	int ret;
+ 	struct nes_adapter *nesadapter = nesdev->nesadapter;
+-	/* int count; */
++	uint pg_cnt = 0;
++	u16 pbl_count_256;
++	u16 pbl_count = 0;
++	u8  use_256_pbls = 0;
++	u8  use_4k_pbls = 0;
++	u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
++	struct nes_root_vpbl new_root = {0, 0, 0};
+ 	u32 opcode = 0;
+ 	u16 major_code;
+ 
+@@ -1915,41 +1969,70 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ 	cqp_request->waiting = 1;
+ 	cqp_wqe = &cqp_request->cqp_wqe;
+ 
+-	spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+-	/* track PBL resources */
+-	if (pbl_count != 0) {
+-		if (pbl_count > 1) {
+-			/* Two level PBL */
+-			if ((pbl_count+1) > nesadapter->free_4kpbl) {
+-				nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n");
+-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-				nes_free_cqp_request(nesdev, cqp_request);
+-				return -ENOMEM;
+-			} else {
+-				nesadapter->free_4kpbl -= pbl_count+1;
+-			}
+-		} else if (residual_page_count > 32) {
+-			if (pbl_count > nesadapter->free_4kpbl) {
+-				nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n");
+-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-				nes_free_cqp_request(nesdev, cqp_request);
+-				return -ENOMEM;
+-			} else {
+-				nesadapter->free_4kpbl -= pbl_count;
++	if (pbl_count_4k) {
++		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
++
++		pg_cnt = ((pbl_count_4k - 1) * 512) + residual_page_count_4k;
++		pbl_count_256 = (pg_cnt + 31) / 32;
++		if (pg_cnt <= 32) {
++			if (pbl_count_256 <= nesadapter->free_256pbl)
++				use_256_pbls = 1;
++			else if (pbl_count_4k <= nesadapter->free_4kpbl)
++				use_4k_pbls = 1;
++		} else if (pg_cnt <= 2048) {
++			if (((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) &&
++			    (nesadapter->free_4kpbl > (nesadapter->max_4kpbl >> 1))) {
++				use_4k_pbls = 1;
++			} else if ((pbl_count_256 + 1) <= nesadapter->free_256pbl) {
++				use_256_pbls = 1;
++				use_two_level = 1;
++			} else if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
++				use_4k_pbls = 1;
+ 			}
+ 		} else {
+-			if (pbl_count > nesadapter->free_256pbl) {
+-				nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n");
+-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-				nes_free_cqp_request(nesdev, cqp_request);
+-				return -ENOMEM;
+-			} else {
+-				nesadapter->free_256pbl -= pbl_count;
+-			}
++			if ((pbl_count_4k + 1) <= nesadapter->free_4kpbl)
++				use_4k_pbls = 1;
++		}
++
++		if (use_256_pbls) {
++			pbl_count = pbl_count_256;
++			nesadapter->free_256pbl -= pbl_count + use_two_level;
++		} else if (use_4k_pbls) {
++			pbl_count =  pbl_count_4k;
++			nesadapter->free_4kpbl -= pbl_count + use_two_level;
++		} else {
++			spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++			nes_debug(NES_DBG_MR, "Out of Pbls\n");
++			nes_free_cqp_request(nesdev, cqp_request);
++			return -ENOMEM;
+ 		}
++
++		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ 	}
+ 
+-	spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++	if (use_256_pbls && use_two_level) {
++		if (root_256(nesdev, root_vpbl, &new_root, pbl_count_4k, pbl_count_256) == 1) {
++			if (new_root.pbl_pbase != 0)
++				root_vpbl = &new_root;
++		} else {
++			spin_lock_irqsave(&nesadapter->pbl_lock, flags);
++			nesadapter->free_256pbl += pbl_count_256 + use_two_level;
++			use_256_pbls = 0;
++
++			if (pbl_count_4k == 1)
++				use_two_level = 0;
++			pbl_count = pbl_count_4k;
++
++			if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
++				nesadapter->free_4kpbl -= pbl_count + use_two_level;
++				use_4k_pbls = 1;
++			}
++			spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++
++			if (use_4k_pbls == 0)
++				return -ENOMEM;
++		}
++	}
+ 
+ 	opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
+ 					NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
+@@ -1978,10 +2061,9 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ 	} else {
+ 		set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
+ 		set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
+-		set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX,
+-				(((pbl_count - 1) * 4096) + (residual_page_count*8)));
++		set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (pg_cnt * 8));
+ 
+-		if ((pbl_count > 1) || (residual_page_count > 32))
++		if (use_4k_pbls)
+ 			cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
+ 	}
+ 	barrier();
+@@ -1997,23 +2079,26 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ 			stag, ret, cqp_request->major_code, cqp_request->minor_code);
+ 	major_code = cqp_request->major_code;
+ 	nes_put_cqp_request(nesdev, cqp_request);
++
+ 	if ((!ret || major_code) && pbl_count != 0) {
+ 		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+-		if (pbl_count > 1)
+-			nesadapter->free_4kpbl += pbl_count+1;
+-		else if (residual_page_count > 32)
+-			nesadapter->free_4kpbl += pbl_count;
+-		else
+-			nesadapter->free_256pbl += pbl_count;
++		if (use_256_pbls)
++			nesadapter->free_256pbl += pbl_count + use_two_level;
++		else if (use_4k_pbls)
++			nesadapter->free_4kpbl += pbl_count + use_two_level;
+ 		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ 	}
++	if (new_root.pbl_pbase)
++		pci_free_consistent(nesdev->pcidev, 512, new_root.pbl_vbase,
++				    new_root.pbl_pbase);
++
+ 	if (!ret)
+ 		return -ETIME;
+ 	else if (major_code)
+ 		return -EIO;
+-	else
+-		return 0;
+ 
++	*actual_pbl_cnt = pbl_count + use_two_level;
++	*used_4k_pbls = use_4k_pbls;
+ 	return 0;
+ }
+ 
+@@ -2178,18 +2263,14 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ 		pbl_count = root_pbl_index;
+ 	}
+ 	ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
+-			buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start);
++			buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start,
++			&nesmr->pbls_used, &nesmr->pbl_4k);
+ 
+ 	if (ret == 0) {
+ 		nesmr->ibmr.rkey = stag;
+ 		nesmr->ibmr.lkey = stag;
+ 		nesmr->mode = IWNES_MEMREG_TYPE_MEM;
+ 		ibmr = &nesmr->ibmr;
+-		nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
+-		nesmr->pbls_used = pbl_count;
+-		if (pbl_count > 1) {
+-			nesmr->pbls_used++;
+-		}
+ 	} else {
+ 		kfree(nesmr);
+ 		ibmr = ERR_PTR(-ENOMEM);
+@@ -2467,8 +2548,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ 					stag, (unsigned int)iova_start,
+ 					(unsigned int)region_length, stag_index,
+ 					(unsigned long long)region->length, pbl_count);
+-			ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl,
+-					first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start);
++			ret = nes_reg_mr(nesdev, nespd, stag, region->length, &root_vpbl,
++					 first_dma_addr, pbl_count, (u16)cur_pbl_index, acc,
++					 &iova_start, &nesmr->pbls_used, &nesmr->pbl_4k);
+ 
+ 			nes_debug(NES_DBG_MR, "ret=%d\n", ret);
+ 
+@@ -2477,11 +2559,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ 				nesmr->ibmr.lkey = stag;
+ 				nesmr->mode = IWNES_MEMREG_TYPE_MEM;
+ 				ibmr = &nesmr->ibmr;
+-				nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
+-				nesmr->pbls_used = pbl_count;
+-				if (pbl_count > 1) {
+-					nesmr->pbls_used++;
+-				}
+ 			} else {
+ 				ib_umem_release(region);
+ 				kfree(nesmr);
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0150_ibv_devinfo.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0150_ibv_devinfo.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0150_ibv_devinfo.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,29 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 1060008..8cd76f9 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -254,6 +254,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
+ 	u32 adapter_size;
+ 	u32 arp_table_size;
+ 	u16 vendor_id;
++	u16 device_id;
+ 	u8  OneG_Mode;
+ 	u8  func_index;
+ 
+@@ -356,6 +357,13 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
+ 		return NULL;
+ 	}
+ 
++	nesadapter->vendor_id = (((u32) nesadapter->mac_addr_high) << 8) |
++				(nesadapter->mac_addr_low >> 24);
++
++	pci_bus_read_config_word(nesdev->pcidev->bus, nesdev->pcidev->devfn,
++				 PCI_DEVICE_ID, &device_id);
++	nesadapter->vendor_part_id = device_id;
++
+ 	if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
+ 							OneG_Mode)) {
+ 		kfree(nesadapter);
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0160_aeq.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0160_aeq.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0160_aeq.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 8cd76f9..56ef919 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -2269,6 +2269,8 @@ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
+ 
+ 		if (++head >= aeq_size)
+ 			head = 0;
++
++		nes_write32(nesdev->regs+NES_AEQ_ALLOC, 1 << 16);
+ 	}
+ 	while (1);
+ 	aeq->aeq_head = head;
+diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
+index 6f8712d..bf7ecfa 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.h
++++ b/drivers/infiniband/hw/nes/nes_hw.h
+@@ -61,6 +61,7 @@ enum pci_regs {
+ 	NES_CQ_ACK = 0x0034,
+ 	NES_WQE_ALLOC = 0x0040,
+ 	NES_CQE_ALLOC = 0x0044,
++	NES_AEQ_ALLOC = 0x0048
+ };
+ 
+ enum indexed_regs {
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0170_cleanup_tx.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0170_cleanup_tx.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0170_cleanup_tx.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,276 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 56ef919..898a03a 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -1644,7 +1644,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
+ 	nesvnic->post_cqp_request = nes_post_cqp_request;
+ 	nesvnic->mcrq_mcast_filter = NULL;
+ 
+-	spin_lock_init(&nesvnic->nic.sq_lock);
+ 	spin_lock_init(&nesvnic->nic.rq_lock);
+ 
+ 	/* setup the RQ */
+@@ -2632,9 +2631,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+ 						} else
+ 							break;
+ 					}
+-					if (skb)
+-						dev_kfree_skb_any(skb);
+ 				}
++				if (skb)
++					dev_kfree_skb_any(skb);
+ 				nesnic->sq_tail++;
+ 				nesnic->sq_tail &= nesnic->sq_size-1;
+ 				if (sq_cqes > 128) {
+diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
+index bf7ecfa..f41a871 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.h
++++ b/drivers/infiniband/hw/nes/nes_hw.h
+@@ -876,7 +876,6 @@ struct nes_hw_nic {
+ 	u8 replenishing_rq;
+ 	u8 reserved;
+ 
+-	spinlock_t sq_lock;
+ 	spinlock_t rq_lock;
+ };
+ 
+@@ -1148,7 +1147,6 @@ struct nes_ib_device;
+ struct nes_vnic {
+ 	struct nes_ib_device *nesibdev;
+ 	u64 sq_full;
+-	u64 sq_locked;
+ 	u64 tso_requests;
+ 	u64 segmented_tso_requests;
+ 	u64 linearized_skbs;
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index 9094310..d16e9bc 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -401,8 +401,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+ 	if (skb_headlen(skb) == skb->len) {
+ 		if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
+ 			nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
+-			nesnic->tx_skb[nesnic->sq_head] = NULL;
+-			dev_kfree_skb(skb);
++			nesnic->tx_skb[nesnic->sq_head] = skb;
+ 		}
+ 	} else {
+ 		/* Deal with Fragments */
+@@ -454,7 +453,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	u32 wqe_count=1;
+ 	u32 send_rc;
+ 	struct iphdr *iph;
+-	unsigned long flags;
+ 	__le16 *wqe_fragment_length;
+ 	u32 nr_frags;
+ 	u32 original_first_length;
+@@ -481,13 +479,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	if (netif_queue_stopped(netdev))
+ 		return NETDEV_TX_BUSY;
+ 
+-	local_irq_save(flags);
+-	if (!spin_trylock(&nesnic->sq_lock)) {
+-		local_irq_restore(flags);
+-		nesvnic->sq_locked++;
+-		return NETDEV_TX_LOCKED;
+-	}
+-
+ 	/* Check if SQ is full */
+ 	if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
+ 		if (!netif_queue_stopped(netdev)) {
+@@ -499,7 +490,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 			}
+ 		}
+ 		nesvnic->sq_full++;
+-		spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ 		return NETDEV_TX_BUSY;
+ 	}
+ 
+@@ -532,7 +522,6 @@ sq_no_longer_full:
+ 					}
+ 				}
+ 				nesvnic->sq_full++;
+-				spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ 				nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
+ 						netdev->name);
+ 				return NETDEV_TX_BUSY;
+@@ -657,17 +646,13 @@ tso_sq_no_longer_full:
+ 			skb_set_transport_header(skb, hoffset);
+ 			skb_set_network_header(skb, nhoffset);
+ 			send_rc = nes_nic_send(skb, netdev);
+-			if (send_rc != NETDEV_TX_OK) {
+-				spin_unlock_irqrestore(&nesnic->sq_lock, flags);
++			if (send_rc != NETDEV_TX_OK)
+ 				return NETDEV_TX_OK;
+-			}
+ 		}
+ 	} else {
+ 		send_rc = nes_nic_send(skb, netdev);
+-		if (send_rc != NETDEV_TX_OK) {
+-			spin_unlock_irqrestore(&nesnic->sq_lock, flags);
++		if (send_rc != NETDEV_TX_OK)
+ 			return NETDEV_TX_OK;
+-		}
+ 	}
+ 
+ 	barrier();
+@@ -677,7 +662,6 @@ tso_sq_no_longer_full:
+ 				(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
+ 
+ 	netdev->trans_start = jiffies;
+-	spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ 
+ 	return NETDEV_TX_OK;
+ }
+@@ -1015,7 +999,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
+ 	"Pause Frames Received",
+ 	"Internal Routing Errors",
+ 	"SQ SW Dropped SKBs",
+-	"SQ Locked",
+ 	"SQ Full",
+ 	"Segmented TSO Requests",
+ 	"Rx Symbol Errors",
+@@ -1132,16 +1115,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
+ 	struct nes_device *nesdev = nesvnic->nesdev;
+ 	u32 nic_count;
+ 	u32 u32temp;
++	u32 index = 0;
+ 
+ 	target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
+-	target_stat_values[0] = nesvnic->nesdev->link_status_interrupts;
+-	target_stat_values[1] = nesvnic->linearized_skbs;
+-	target_stat_values[2] = nesvnic->tso_requests;
++	target_stat_values[index] = nesvnic->nesdev->link_status_interrupts;
++	target_stat_values[++index] = nesvnic->linearized_skbs;
++	target_stat_values[++index] = nesvnic->tso_requests;
+ 
+ 	u32temp = nes_read_indexed(nesdev,
+ 			NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ 	nesvnic->nesdev->mac_pause_frames_sent += u32temp;
+-	target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent;
++	target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent;
+ 
+ 	u32temp = nes_read_indexed(nesdev,
+ 			NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+@@ -1212,60 +1196,59 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
+ 		nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
+ 	}
+ 
+-	target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received;
+-	target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err;
+-	target_stat_values[6] = nesvnic->tx_sw_dropped;
+-	target_stat_values[7] = nesvnic->sq_locked;
+-	target_stat_values[8] = nesvnic->sq_full;
+-	target_stat_values[9] = nesvnic->segmented_tso_requests;
+-	target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames;
+-	target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames;
+-	target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames;
+-	target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames;
+-	target_stat_values[14] = nesvnic->endnode_nstat_rx_discard;
+-	target_stat_values[15] = nesvnic->endnode_nstat_rx_octets;
+-	target_stat_values[16] = nesvnic->endnode_nstat_rx_frames;
+-	target_stat_values[17] = nesvnic->endnode_nstat_tx_octets;
+-	target_stat_values[18] = nesvnic->endnode_nstat_tx_frames;
+-	target_stat_values[19] = mh_detected;
+-	target_stat_values[20] = mh_pauses_sent;
+-	target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits;
+-	target_stat_values[22] = atomic_read(&cm_connects);
+-	target_stat_values[23] = atomic_read(&cm_accepts);
+-	target_stat_values[24] = atomic_read(&cm_disconnects);
+-	target_stat_values[25] = atomic_read(&cm_connecteds);
+-	target_stat_values[26] = atomic_read(&cm_connect_reqs);
+-	target_stat_values[27] = atomic_read(&cm_rejects);
+-	target_stat_values[28] = atomic_read(&mod_qp_timouts);
+-	target_stat_values[29] = atomic_read(&qps_created);
+-	target_stat_values[30] = atomic_read(&sw_qps_destroyed);
+-	target_stat_values[31] = atomic_read(&qps_destroyed);
+-	target_stat_values[32] = atomic_read(&cm_closes);
+-	target_stat_values[33] = cm_packets_sent;
+-	target_stat_values[34] = cm_packets_bounced;
+-	target_stat_values[35] = cm_packets_created;
+-	target_stat_values[36] = cm_packets_received;
+-	target_stat_values[37] = cm_packets_dropped;
+-	target_stat_values[38] = cm_packets_retrans;
+-	target_stat_values[39] = cm_listens_created;
+-	target_stat_values[40] = cm_listens_destroyed;
+-	target_stat_values[41] = cm_backlog_drops;
+-	target_stat_values[42] = atomic_read(&cm_loopbacks);
+-	target_stat_values[43] = atomic_read(&cm_nodes_created);
+-	target_stat_values[44] = atomic_read(&cm_nodes_destroyed);
+-	target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts);
+-	target_stat_values[46] = atomic_read(&cm_resets_recvd);
+-	target_stat_values[47] = int_mod_timer_init;
+-	target_stat_values[48] = int_mod_cq_depth_1;
+-	target_stat_values[49] = int_mod_cq_depth_4;
+-	target_stat_values[50] = int_mod_cq_depth_16;
+-	target_stat_values[51] = int_mod_cq_depth_24;
+-	target_stat_values[52] = int_mod_cq_depth_32;
+-	target_stat_values[53] = int_mod_cq_depth_128;
+-	target_stat_values[54] = int_mod_cq_depth_256;
+-	target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
+-	target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
+-	target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
++	target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received;
++	target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err;
++	target_stat_values[++index] = nesvnic->tx_sw_dropped;
++	target_stat_values[++index] = nesvnic->sq_full;
++	target_stat_values[++index] = nesvnic->segmented_tso_requests;
++	target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames;
++	target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
++	target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
++	target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
++	target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
++	target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
++	target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
++	target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
++	target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
++	target_stat_values[++index] = mh_detected;
++	target_stat_values[++index] = mh_pauses_sent;
++	target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
++	target_stat_values[++index] = atomic_read(&cm_connects);
++	target_stat_values[++index] = atomic_read(&cm_accepts);
++	target_stat_values[++index] = atomic_read(&cm_disconnects);
++	target_stat_values[++index] = atomic_read(&cm_connecteds);
++	target_stat_values[++index] = atomic_read(&cm_connect_reqs);
++	target_stat_values[++index] = atomic_read(&cm_rejects);
++	target_stat_values[++index] = atomic_read(&mod_qp_timouts);
++	target_stat_values[++index] = atomic_read(&qps_created);
++	target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
++	target_stat_values[++index] = atomic_read(&qps_destroyed);
++	target_stat_values[++index] = atomic_read(&cm_closes);
++	target_stat_values[++index] = cm_packets_sent;
++	target_stat_values[++index] = cm_packets_bounced;
++	target_stat_values[++index] = cm_packets_created;
++	target_stat_values[++index] = cm_packets_received;
++	target_stat_values[++index] = cm_packets_dropped;
++	target_stat_values[++index] = cm_packets_retrans;
++	target_stat_values[++index] = cm_listens_created;
++	target_stat_values[++index] = cm_listens_destroyed;
++	target_stat_values[++index] = cm_backlog_drops;
++	target_stat_values[++index] = atomic_read(&cm_loopbacks);
++	target_stat_values[++index] = atomic_read(&cm_nodes_created);
++	target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
++	target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
++	target_stat_values[++index] = atomic_read(&cm_resets_recvd);
++	target_stat_values[++index] = int_mod_timer_init;
++	target_stat_values[++index] = int_mod_cq_depth_1;
++	target_stat_values[++index] = int_mod_cq_depth_4;
++	target_stat_values[++index] = int_mod_cq_depth_16;
++	target_stat_values[++index] = int_mod_cq_depth_24;
++	target_stat_values[++index] = int_mod_cq_depth_32;
++	target_stat_values[++index] = int_mod_cq_depth_128;
++	target_stat_values[++index] = int_mod_cq_depth_256;
++	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
++	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
++	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
+ 
+ }
+ 
+@@ -1619,7 +1602,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
+ 	nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ 	netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ 	netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
+-	netdev->features |= NETIF_F_LLTX;
+ 
+ 	/* Fill in the port structure */
+ 	nesvnic->netdev = netdev;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0180_lsmm.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0180_lsmm.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0180_lsmm.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,117 @@
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 246619b..3d94c07 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -2499,12 +2499,14 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
+ 	int ret = 0;
+ 	struct nes_vnic *nesvnic;
+ 	struct nes_device *nesdev;
++	struct nes_ib_device *nesibdev;
+ 
+ 	nesvnic = to_nesvnic(nesqp->ibqp.device);
+ 	if (!nesvnic)
+ 		return -EINVAL;
+ 
+ 	nesdev = nesvnic->nesdev;
++	nesibdev = nesvnic->nesibdev;
+ 
+ 	nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ 			atomic_read(&nesvnic->netdev->refcnt));
+@@ -2516,6 +2518,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
+ 	} else {
+ 		/* Need to free the Last Streaming Mode Message */
+ 		if (nesqp->ietf_frame) {
++			if (nesqp->lsmm_mr)
++				nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
+ 			pci_free_consistent(nesdev->pcidev,
+ 					nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
+ 					nesqp->ietf_frame, nesqp->ietf_frame_pbase);
+@@ -2552,6 +2556,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	u32 crc_value;
+ 	int ret;
+ 	int passive_state;
++	struct nes_ib_device *nesibdev;
++	struct ib_mr *ibmr = NULL;
++	struct ib_phys_buf ibphysbuf;
++	struct nes_pd *nespd;
++
++
+ 
+ 	ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
+ 	if (!ibqp)
+@@ -2610,6 +2620,26 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	if (cm_id->remote_addr.sin_addr.s_addr !=
+ 			cm_id->local_addr.sin_addr.s_addr) {
+ 		u64temp = (unsigned long)nesqp;
++		nesibdev = nesvnic->nesibdev;
++		nespd = nesqp->nespd;
++		ibphysbuf.addr = nesqp->ietf_frame_pbase;
++		ibphysbuf.size = conn_param->private_data_len + 
++					sizeof(struct ietf_mpa_frame);
++		ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, 
++						&ibphysbuf, 1,
++						IB_ACCESS_LOCAL_WRITE, 
++						(u64 *)&nesqp->ietf_frame);
++		if (!ibmr) {
++			nes_debug(NES_DBG_CM, "Unable to register memory region"
++					"for lSMM for cm_node = %p \n",
++					cm_node);
++			return -ENOMEM;
++		}
++
++		ibmr->pd = &nespd->ibpd;
++		ibmr->device = nespd->ibpd.device;
++		nesqp->lsmm_mr = ibmr;
++
+ 		u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ 		set_wqe_64bit_value(wqe->wqe_words,
+ 			NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+@@ -2620,14 +2650,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
+ 			cpu_to_le32(conn_param->private_data_len +
+ 			sizeof(struct ietf_mpa_frame));
+-		wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
+-			cpu_to_le32((u32)nesqp->ietf_frame_pbase);
+-		wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
+-			cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
++		set_wqe_64bit_value(wqe->wqe_words,
++					NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
++					(u64)nesqp->ietf_frame);
+ 		wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
+ 			cpu_to_le32(conn_param->private_data_len +
+ 			sizeof(struct ietf_mpa_frame));
+-		wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
++		wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
+ 
+ 		nesqp->nesqp_context->ird_ord_sizes |=
+ 			cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 2f0545d..a7c0c1f 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1363,8 +1363,10 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ 					NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
+ 			nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
+ 					NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
++			if (!udata) {
+ 				nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
+ 				nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
++			}
+ 			nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
+ 					((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
+ 			u64temp = (u64)nesqp->hwqp.sq_pbase;
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
+index da3c368..5e48f67 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.h
++++ b/drivers/infiniband/hw/nes/nes_verbs.h
+@@ -134,6 +134,7 @@ struct nes_qp {
+ 	struct ietf_mpa_frame *ietf_frame;
+ 	dma_addr_t            ietf_frame_pbase;
+ 	wait_queue_head_t     state_waitq;
++	struct ib_mr          *lsmm_mr;
+ 	unsigned long         socket;
+ 	struct nes_hw_qp      hwqp;
+ 	struct work_struct    work;
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0190_dev_alloc_skb.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0190_dev_alloc_skb.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0190_dev_alloc_skb.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,67 @@
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 3d94c07..3a51539 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -94,7 +94,6 @@ static int mini_cm_set(struct nes_cm_core *, u32, u32);
+ 
+ static void form_cm_frame(struct sk_buff *, struct nes_cm_node *,
+ 	void *, u32, void *, u32, u8);
+-static struct sk_buff *get_free_pkt(u32);
+ static int add_ref_cm_node(struct nes_cm_node *);
+ static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
+ 
+@@ -687,7 +686,7 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack,
+ 	optionssize += 1;
+ 
+ 	if (!skb)
+-		skb = get_free_pkt(MAX_CM_BUFFER);
++		skb = dev_alloc_skb(MAX_CM_BUFFER);
+ 	if (!skb) {
+ 		nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ 		return -1;
+@@ -712,7 +711,7 @@ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ 	int flags = SET_RST | SET_ACK;
+ 
+ 	if (!skb)
+-		skb = get_free_pkt(MAX_CM_BUFFER);
++		skb = dev_alloc_skb(MAX_CM_BUFFER);
+ 	if (!skb) {
+ 		nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ 		return -1;
+@@ -733,7 +732,7 @@ static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ 	int ret;
+ 
+ 	if (!skb)
+-		skb = get_free_pkt(MAX_CM_BUFFER);
++		skb = dev_alloc_skb(MAX_CM_BUFFER);
+ 
+ 	if (!skb) {
+ 		nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+@@ -756,7 +755,7 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ 
+ 	/* if we didn't get a frame get one */
+ 	if (!skb)
+-		skb = get_free_pkt(MAX_CM_BUFFER);
++		skb = dev_alloc_skb(MAX_CM_BUFFER);
+ 
+ 	if (!skb) {
+ 		nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+@@ -771,15 +770,6 @@ static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ 
+ 
+ /**
+- * get_free_pkt
+- */
+-static struct sk_buff *get_free_pkt(u32 pktsize)
+-{
+-		return dev_alloc_skb(pktsize);
+-}
+-
+-
+-/**
+  * find_node - find a cm node that matches the reference cm node
+  */
+ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0200_dyn_conn.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0200_dyn_conn.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0200_dyn_conn.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,1068 @@
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 3a51539..ab10191 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -103,6 +103,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
+ static void nes_disconnect_worker(struct work_struct *work);
+ 
+ static int send_mpa_request(struct nes_cm_node *, struct sk_buff *);
++static int send_mpa_reject(struct nes_cm_node *);
+ static int send_syn(struct nes_cm_node *, u32, struct sk_buff *);
+ static int send_reset(struct nes_cm_node *, struct sk_buff *);
+ static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb);
+@@ -113,8 +114,7 @@ static void process_packet(struct nes_cm_node *, struct sk_buff *,
+ static void active_open_err(struct nes_cm_node *, struct sk_buff *, int);
+ static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int);
+ static void cleanup_retrans_entry(struct nes_cm_node *);
+-static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *,
+-	enum nes_cm_event_type);
++static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *);
+ static void free_retrans_entry(struct nes_cm_node *cm_node);
+ static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
+ 	struct sk_buff *skb, int optionsize, int passive);
+@@ -124,6 +124,8 @@ static void cm_event_connected(struct nes_cm_event *);
+ static void cm_event_connect_error(struct nes_cm_event *);
+ static void cm_event_reset(struct nes_cm_event *);
+ static void cm_event_mpa_req(struct nes_cm_event *);
++static void cm_event_mpa_reject(struct nes_cm_event *);
++static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node);
+ 
+ static void print_core(struct nes_cm_core *core);
+ 
+@@ -196,7 +198,6 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
+  */
+ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ {
+-	int ret;
+ 	if (!skb) {
+ 		nes_debug(NES_DBG_CM, "skb set to NULL\n");
+ 		return -1;
+@@ -206,11 +207,27 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ 	form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
+ 			cm_node->mpa_frame_size, SET_ACK);
+ 
+-	ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+-	if (ret < 0)
+-		return ret;
++	return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
++}
+ 
+-	return 0;
++
++
++static int send_mpa_reject(struct nes_cm_node *cm_node)
++{
++	struct sk_buff  *skb = NULL;
++
++	skb = dev_alloc_skb(MAX_CM_BUFFER);
++	if (!skb) {
++		nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
++		return -ENOMEM;
++	}
++
++	/* send an MPA reject frame */
++	form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
++			cm_node->mpa_frame_size, SET_ACK | SET_FIN);
++
++	cm_node->state = NES_CM_STATE_FIN_WAIT1;
++	return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+ }
+ 
+ 
+@@ -218,14 +235,17 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
+  * recv_mpa - process a received TCP pkt, we are expecting an
+  * IETF MPA frame
+  */
+-static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
++static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
++		u32 len)
+ {
+ 	struct ietf_mpa_frame *mpa_frame;
+ 
++	*type = NES_MPA_REQUEST_ACCEPT;
++
+ 	/* assume req frame is in tcp data payload */
+ 	if (len < sizeof(struct ietf_mpa_frame)) {
+ 		nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
+-		return -1;
++		return -EINVAL;
+ 	}
+ 
+ 	mpa_frame = (struct ietf_mpa_frame *)buffer;
+@@ -234,14 +254,25 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
+ 	if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
+ 		nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
+ 				" complete (%x + %x != %x)\n",
+-				cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len);
+-		return -1;
++				cm_node->mpa_frame_size,
++				(u32)sizeof(struct ietf_mpa_frame), len);
++		return -EINVAL;
++	}
++	/* make sure it does not exceed the max size */
++	if (len > MAX_CM_BUFFER) {
++		nes_debug(NES_DBG_CM, "The received ietf buffer was too large"
++				" (%x + %x != %x)\n",
++				cm_node->mpa_frame_size,
++				(u32)sizeof(struct ietf_mpa_frame), len);
++		return -EINVAL;
+ 	}
+ 
+ 	/* copy entire MPA frame to our cm_node's frame */
+ 	memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
+ 			cm_node->mpa_frame_size);
+ 
++	if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
++		*type = NES_MPA_REQUEST_REJECT;
+ 	return 0;
+ }
+ 
+@@ -380,7 +411,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 
+ 	new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ 	if (!new_send)
+-		return -1;
++		return -ENOMEM;
+ 
+ 	/* new_send->timetosend = currenttime */
+ 	new_send->retrycount = NES_DEFAULT_RETRYS;
+@@ -394,9 +425,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 
+ 	if (type == NES_TIMER_TYPE_CLOSE) {
+ 		new_send->timetosend += (HZ/10);
+-		spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+-		list_add_tail(&new_send->list, &cm_node->recv_list);
+-		spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
++		if (cm_node->recv_entry) {
++			WARN_ON(1);
++			return -EINVAL;
++		}
++		cm_node->recv_entry = new_send;
+ 	}
+ 
+ 	if (type == NES_TIMER_TYPE_SEND) {
+@@ -435,24 +468,78 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	return ret;
+ }
+ 
++static void nes_retrans_expired(struct nes_cm_node *cm_node)
++{
++	switch (cm_node->state) {
++	case NES_CM_STATE_SYN_RCVD:
++	case NES_CM_STATE_CLOSING:
++		rem_ref_cm_node(cm_node->cm_core, cm_node);
++		break;
++	case NES_CM_STATE_LAST_ACK:
++	case NES_CM_STATE_FIN_WAIT1:
++	case NES_CM_STATE_MPAREJ_RCVD:
++		send_reset(cm_node, NULL);
++		break;
++	default:
++		create_event(cm_node, NES_CM_EVENT_ABORTED);
++	}
++}
++
++static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
++{
++	struct nes_timer_entry *recv_entry = cm_node->recv_entry;
++	struct iw_cm_id *cm_id = cm_node->cm_id;
++	struct nes_qp *nesqp;
++	unsigned long qplockflags;
++
++	if (!recv_entry)
++		return;
++	nesqp = (struct nes_qp *)recv_entry->skb;
++	if (nesqp) {
++		spin_lock_irqsave(&nesqp->lock, qplockflags);
++		if (nesqp->cm_id) {
++			nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
++				"refcount = %d: HIT A "
++				"NES_TIMER_TYPE_CLOSE with something "
++				"to do!!!\n", nesqp->hwqp.qp_id, cm_id,
++				atomic_read(&nesqp->refcount));
++			nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
++			nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
++			nesqp->ibqp_state = IB_QPS_ERR;
++			spin_unlock_irqrestore(&nesqp->lock, qplockflags);
++			nes_cm_disconn(nesqp);
++		} else {
++			spin_unlock_irqrestore(&nesqp->lock, qplockflags);
++			nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
++				"refcount = %d: HIT A "
++				"NES_TIMER_TYPE_CLOSE with nothing "
++				"to do!!!\n", nesqp->hwqp.qp_id, cm_id,
++				atomic_read(&nesqp->refcount));
++		}
++	} else if (rem_node) {
++		/* TIME_WAIT state */
++		rem_ref_cm_node(cm_node->cm_core, cm_node);
++	}
++	if (cm_node->cm_id)
++		cm_id->rem_ref(cm_id);
++	kfree(recv_entry);
++	cm_node->recv_entry = NULL;
++}
+ 
+ /**
+  * nes_cm_timer_tick
+  */
+ static void nes_cm_timer_tick(unsigned long pass)
+ {
+-	unsigned long flags, qplockflags;
++	unsigned long flags;
+ 	unsigned long nexttimeout = jiffies + NES_LONG_TIME;
+-	struct iw_cm_id *cm_id;
+ 	struct nes_cm_node *cm_node;
+ 	struct nes_timer_entry *send_entry, *recv_entry;
+-	struct list_head *list_core, *list_core_temp;
+-	struct list_head *list_node, *list_node_temp;
++	struct list_head *list_core_temp;
++	struct list_head *list_node;
+ 	struct nes_cm_core *cm_core = g_cm_core;
+-	struct nes_qp *nesqp;
+ 	u32 settimer = 0;
+ 	int ret = NETDEV_TX_OK;
+-	enum nes_cm_node_state last_state;
+ 
+ 	struct list_head timer_list;
+ 	INIT_LIST_HEAD(&timer_list);
+@@ -461,7 +548,7 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 	list_for_each_safe(list_node, list_core_temp,
+ 				&cm_core->connected_nodes) {
+ 		cm_node = container_of(list_node, struct nes_cm_node, list);
+-		if (!list_empty(&cm_node->recv_list) || (cm_node->send_entry)) {
++		if ((cm_node->recv_entry) || (cm_node->send_entry)) {
+ 			add_ref_cm_node(cm_node);
+ 			list_add(&cm_node->timer_entry, &timer_list);
+ 		}
+@@ -471,54 +558,18 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 	list_for_each_safe(list_node, list_core_temp, &timer_list) {
+ 		cm_node = container_of(list_node, struct nes_cm_node,
+ 					timer_entry);
+-		spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+-		list_for_each_safe(list_core, list_node_temp,
+-			&cm_node->recv_list) {
+-			recv_entry = container_of(list_core,
+-				struct nes_timer_entry, list);
+-			if (!recv_entry)
+-				break;
++		recv_entry = cm_node->recv_entry;
++
++		if (recv_entry) {
+ 			if (time_after(recv_entry->timetosend, jiffies)) {
+ 				if (nexttimeout > recv_entry->timetosend ||
+-					!settimer) {
++						!settimer) {
+ 					nexttimeout = recv_entry->timetosend;
+ 					settimer = 1;
+ 				}
+-				continue;
+-			}
+-			list_del(&recv_entry->list);
+-			cm_id = cm_node->cm_id;
+-			spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+-			nesqp = (struct nes_qp *)recv_entry->skb;
+-			spin_lock_irqsave(&nesqp->lock, qplockflags);
+-			if (nesqp->cm_id) {
+-				nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
+-					"refcount = %d: HIT A "
+-					"NES_TIMER_TYPE_CLOSE with something "
+-					"to do!!!\n", nesqp->hwqp.qp_id, cm_id,
+-					atomic_read(&nesqp->refcount));
+-				nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+-				nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+-				nesqp->ibqp_state = IB_QPS_ERR;
+-				spin_unlock_irqrestore(&nesqp->lock,
+-					qplockflags);
+-				nes_cm_disconn(nesqp);
+-			} else {
+-				spin_unlock_irqrestore(&nesqp->lock,
+-					qplockflags);
+-				nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
+-					"refcount = %d: HIT A "
+-					"NES_TIMER_TYPE_CLOSE with nothing "
+-					"to do!!!\n", nesqp->hwqp.qp_id, cm_id,
+-					atomic_read(&nesqp->refcount));
+-			}
+-			if (cm_id)
+-				cm_id->rem_ref(cm_id);
+-
+-			kfree(recv_entry);
+-			spin_lock_irqsave(&cm_node->recv_list_lock, flags);
++			} else
++				handle_recv_entry(cm_node, 1);
+ 		}
+-		spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ 
+ 		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ 		do {
+@@ -533,12 +584,11 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 						nexttimeout =
+ 							send_entry->timetosend;
+ 						settimer = 1;
+-						break;
+ 					}
+ 				} else {
+ 					free_retrans_entry(cm_node);
+-					break;
+ 				}
++				break;
+ 			}
+ 
+ 			if ((cm_node->state == NES_CM_STATE_TSA) ||
+@@ -550,16 +600,12 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 			if (!send_entry->retranscount ||
+ 				!send_entry->retrycount) {
+ 				cm_packets_dropped++;
+-				last_state = cm_node->state;
+-				cm_node->state = NES_CM_STATE_CLOSED;
+ 				free_retrans_entry(cm_node);
++
+ 				spin_unlock_irqrestore(
+ 					&cm_node->retrans_list_lock, flags);
+-				if (last_state == NES_CM_STATE_SYN_RCVD)
+-					rem_ref_cm_node(cm_core, cm_node);
+-				else
+-					create_event(cm_node,
+-						NES_CM_EVENT_ABORTED);
++				nes_retrans_expired(cm_node);
++				cm_node->state = NES_CM_STATE_CLOSED;
+ 				spin_lock_irqsave(&cm_node->retrans_list_lock,
+ 					flags);
+ 				break;
+@@ -714,7 +760,7 @@ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ 		skb = dev_alloc_skb(MAX_CM_BUFFER);
+ 	if (!skb) {
+ 		nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+-		return -1;
++ 		return -ENOMEM;
+ 	}
+ 
+ 	form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
+@@ -873,7 +919,8 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
+ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
+ 	struct nes_cm_listener *listener, int free_hanging_nodes)
+ {
+-	int ret = 1;
++	int ret = -EINVAL;
++	int err = 0;
+ 	unsigned long flags;
+ 	struct list_head *list_pos = NULL;
+ 	struct list_head *list_temp = NULL;
+@@ -902,10 +949,60 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
+ 
+ 	list_for_each_safe(list_pos, list_temp, &reset_list) {
+ 		cm_node = container_of(list_pos, struct nes_cm_node,
+-					reset_entry);
+-		cleanup_retrans_entry(cm_node);
+-		send_reset(cm_node, NULL);
+-		rem_ref_cm_node(cm_node->cm_core, cm_node);
++				reset_entry);
++		{
++			struct nes_cm_node *loopback = cm_node->loopbackpartner;
++			if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
++				rem_ref_cm_node(cm_node->cm_core, cm_node);
++			} else {
++				if (!loopback) {
++					cleanup_retrans_entry(cm_node);
++					err = send_reset(cm_node, NULL);
++					if (err) {
++						cm_node->state =
++							 NES_CM_STATE_CLOSED;
++						WARN_ON(1);
++					} else {
++						cm_node->state =
++							NES_CM_STATE_CLOSED;
++						rem_ref_cm_node(
++							cm_node->cm_core,
++							cm_node);
++					}
++				} else {
++					struct nes_cm_event event;
++
++					event.cm_node = loopback;
++					event.cm_info.rem_addr =
++							loopback->rem_addr;
++					event.cm_info.loc_addr =
++							loopback->loc_addr;
++					event.cm_info.rem_port =
++							loopback->rem_port;
++					event.cm_info.loc_port =
++							 loopback->loc_port;
++					event.cm_info.cm_id = loopback->cm_id;
++					cm_event_connect_error(&event);
++					loopback->state = NES_CM_STATE_CLOSED;
++
++					event.cm_node = cm_node;
++					event.cm_info.rem_addr =
++							 cm_node->rem_addr;
++					event.cm_info.loc_addr =
++							 cm_node->loc_addr;
++					event.cm_info.rem_port =
++							 cm_node->rem_port;
++					event.cm_info.loc_port =
++							 cm_node->loc_port;
++					event.cm_info.cm_id = cm_node->cm_id;
++					cm_event_reset(&event);
++
++					rem_ref_cm_node(cm_node->cm_core,
++							 cm_node);
++
++				}
++			}
++		}
+ 	}
+ 
+ 	spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+@@ -966,6 +1063,7 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
+ 	if (cm_node->accept_pend) {
+ 		BUG_ON(!cm_node->listener);
+ 		atomic_dec(&cm_node->listener->pend_accepts_cnt);
++		cm_node->accept_pend = 0;
+ 		BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
+ 	}
+ 
+@@ -993,7 +1091,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
+ 	memset(&fl, 0, sizeof fl);
+ 	fl.nl_u.ip4_u.daddr = htonl(dst_ip);
+ 	if (ip_route_output_key(&init_net, &rt, &fl)) {
+-		printk("%s: ip_route_output_key failed for 0x%08X\n",
++ 		printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
+ 				__func__, dst_ip);
+ 		return rc;
+ 	}
+@@ -1058,8 +1156,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+ 			cm_node->cm_id);
+ 
+ 	spin_lock_init(&cm_node->retrans_list_lock);
+-	INIT_LIST_HEAD(&cm_node->recv_list);
+-	spin_lock_init(&cm_node->recv_list_lock);
+ 
+ 	cm_node->loopbackpartner = NULL;
+ 	atomic_set(&cm_node->ref_count, 1);
+@@ -1127,10 +1223,7 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node)
+ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+ 	struct nes_cm_node *cm_node)
+ {
+-	unsigned long flags, qplockflags;
+-	struct nes_timer_entry *recv_entry;
+-	struct iw_cm_id *cm_id;
+-	struct list_head *list_core, *list_node_temp;
++	unsigned long flags;
+ 	struct nes_qp *nesqp;
+ 
+ 	if (!cm_node)
+@@ -1151,38 +1244,9 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+ 		atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ 		BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
+ 	}
+-	BUG_ON(cm_node->send_entry);
+-	spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+-	list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
+-		recv_entry = container_of(list_core, struct nes_timer_entry,
+-				list);
+-		list_del(&recv_entry->list);
+-		cm_id = cm_node->cm_id;
+-		spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+-		nesqp = (struct nes_qp *)recv_entry->skb;
+-		spin_lock_irqsave(&nesqp->lock, qplockflags);
+-		if (nesqp->cm_id) {
+-			nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
+-				"NES_TIMER_TYPE_CLOSE with something to do!\n",
+-				nesqp->hwqp.qp_id, cm_id);
+-			nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+-			nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+-			nesqp->ibqp_state = IB_QPS_ERR;
+-			spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+-			nes_cm_disconn(nesqp);
+-		} else {
+-			spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+-			nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
+-				"NES_TIMER_TYPE_CLOSE with nothing to do!\n",
+-				nesqp->hwqp.qp_id, cm_id);
+-		}
+-		cm_id->rem_ref(cm_id);
+-
+-		kfree(recv_entry);
+-		spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+-	}
+-	spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+-
++	WARN_ON(cm_node->send_entry);
++	if (cm_node->recv_entry)
++		handle_recv_entry(cm_node, 0);
+ 	if (cm_node->listener) {
+ 		mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
+ 	} else {
+@@ -1267,8 +1331,7 @@ static void drop_packet(struct sk_buff *skb)
+ 	dev_kfree_skb_any(skb);
+ }
+ 
+-static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+-	struct tcphdr *tcph)
++static void handle_fin_pkt(struct nes_cm_node *cm_node)
+ {
+ 	nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
+ 		"refcnt=%d\n", cm_node, cm_node->state,
+@@ -1280,23 +1343,30 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	case NES_CM_STATE_SYN_SENT:
+ 	case NES_CM_STATE_ESTABLISHED:
+ 	case NES_CM_STATE_MPAREQ_SENT:
++	case NES_CM_STATE_MPAREJ_RCVD:
+ 		cm_node->state = NES_CM_STATE_LAST_ACK;
+-		send_fin(cm_node, skb);
++		send_fin(cm_node, NULL);
+ 		break;
+ 	case NES_CM_STATE_FIN_WAIT1:
+ 		cm_node->state = NES_CM_STATE_CLOSING;
+-		send_ack(cm_node, skb);
++		send_ack(cm_node, NULL);
++		/* Wait for ACK as this is simultanous close..
++		* After we receive ACK, do not send anything..
++		* Just rm the node.. Done.. */
+ 		break;
+ 	case NES_CM_STATE_FIN_WAIT2:
+ 		cm_node->state = NES_CM_STATE_TIME_WAIT;
+-		send_ack(cm_node, skb);
++		send_ack(cm_node, NULL);
++		schedule_nes_timer(cm_node, NULL,  NES_TIMER_TYPE_CLOSE, 1, 0);
++		break;
++	case NES_CM_STATE_TIME_WAIT:
+ 		cm_node->state = NES_CM_STATE_CLOSED;
++		rem_ref_cm_node(cm_node->cm_core, cm_node);
+ 		break;
+ 	case NES_CM_STATE_TSA:
+ 	default:
+ 		nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n",
+ 			cm_node, cm_node->state);
+-		drop_packet(skb);
+ 		break;
+ 	}
+ }
+@@ -1342,23 +1412,35 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		cleanup_retrans_entry(cm_node);
+ 		drop_packet(skb);
+ 		break;
++	case NES_CM_STATE_TIME_WAIT:
++		cleanup_retrans_entry(cm_node);
++		cm_node->state = NES_CM_STATE_CLOSED;
++		rem_ref_cm_node(cm_node->cm_core, cm_node);
++		drop_packet(skb);
++		break;
++	case NES_CM_STATE_FIN_WAIT1:
++		cleanup_retrans_entry(cm_node);
++		nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
+ 	default:
+ 		drop_packet(skb);
+ 		break;
+ 	}
+ }
+ 
+-static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb,
+-	enum nes_cm_event_type type)
++
++static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ {
+ 
+-	int	ret;
++	int	ret = 0;
+ 	int datasize = skb->len;
+ 	u8 *dataloc = skb->data;
+-	ret = parse_mpa(cm_node, dataloc, datasize);
+-	if (ret < 0) {
++
++	enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN;
++	u32     res_type;
++	ret = parse_mpa(cm_node, dataloc, &res_type, datasize);
++	if (ret) {
+ 		nes_debug(NES_DBG_CM, "didn't like MPA Request\n");
+-		if (type == NES_CM_EVENT_CONNECTED) {
++		if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) {
+ 			nes_debug(NES_DBG_CM, "%s[%u] create abort for "
+ 				"cm_node=%p listener=%p state=%d\n", __func__,
+ 				__LINE__, cm_node, cm_node->listener,
+@@ -1367,18 +1449,38 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		} else {
+ 			passive_open_err(cm_node, skb, 1);
+ 		}
+-	} else {
+-		cleanup_retrans_entry(cm_node);
+-		dev_kfree_skb_any(skb);
+-		if (type == NES_CM_EVENT_CONNECTED)
++		return;
++	}
++
++	switch (cm_node->state) {
++	case NES_CM_STATE_ESTABLISHED:
++		if (res_type == NES_MPA_REQUEST_REJECT) {
++			/*BIG problem as we are receiving the MPA.. So should
++			* not be REJECT.. This is Passive Open.. We can
++			* only receive it Reject for Active Open...*/
++			WARN_ON(1);
++		}
++		cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
++		type = NES_CM_EVENT_MPA_REQ;
++		atomic_set(&cm_node->passive_state,
++				NES_PASSIVE_STATE_INDICATED);
++		break;
++	case NES_CM_STATE_MPAREQ_SENT:
++		if (res_type == NES_MPA_REQUEST_REJECT) {
++			type = NES_CM_EVENT_MPA_REJECT;
++			cm_node->state = NES_CM_STATE_MPAREJ_RCVD;
++		} else {
++			type = NES_CM_EVENT_CONNECTED;
+ 			cm_node->state = NES_CM_STATE_TSA;
+-		else
+-			atomic_set(&cm_node->passive_state,
+-					NES_PASSIVE_STATE_INDICATED);
+-		create_event(cm_node, type);
++		}
+ 
++		break;
++	default:
++		WARN_ON(1);
++		break;
+ 	}
+-	return ;
++	dev_kfree_skb_any(skb);
++	create_event(cm_node, type);
+ }
+ 
+ static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
+@@ -1466,8 +1568,6 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		break;
+ 	case NES_CM_STATE_LISTENING:
+ 		/* Passive OPEN */
+-		cm_node->accept_pend = 1;
+-		atomic_inc(&cm_node->listener->pend_accepts_cnt);
+ 		if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ 				cm_node->listener->backlog) {
+ 			nes_debug(NES_DBG_CM, "drop syn due to backlog "
+@@ -1485,6 +1585,9 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		}
+ 		cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ 		BUG_ON(cm_node->send_entry);
++		cm_node->accept_pend = 1;
++		atomic_inc(&cm_node->listener->pend_accepts_cnt);
++
+ 		cm_node->state = NES_CM_STATE_SYN_RCVD;
+ 		send_syn(cm_node, 1, skb);
+ 		break;
+@@ -1519,6 +1622,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	inc_sequence = ntohl(tcph->seq);
+ 	switch (cm_node->state) {
+ 	case NES_CM_STATE_SYN_SENT:
++		cleanup_retrans_entry(cm_node);
+ 		/* active open */
+ 		if (check_syn(cm_node, tcph, skb))
+ 			return;
+@@ -1568,10 +1672,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	u32 rem_seq;
+ 	int ret;
+ 	int optionsize;
+-	u32 temp_seq = cm_node->tcp_cntxt.loc_seq_num;
+-
+ 	optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+-	cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ 
+ 	if (check_seq(cm_node, tcph, skb))
+ 		return;
+@@ -1581,7 +1682,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	rem_seq = ntohl(tcph->seq);
+ 	rem_seq_ack =  ntohl(tcph->ack_seq);
+ 	datasize = skb->len;
+-
++	cleanup_retrans_entry(cm_node);
+ 	switch (cm_node->state) {
+ 	case NES_CM_STATE_SYN_RCVD:
+ 		/* Passive OPEN */
+@@ -1589,7 +1690,6 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		if (ret)
+ 			break;
+ 		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+-		cm_node->tcp_cntxt.loc_seq_num = temp_seq;
+ 		if (cm_node->tcp_cntxt.rem_ack_num !=
+ 		    cm_node->tcp_cntxt.loc_seq_num) {
+ 			nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n");
+@@ -1598,31 +1698,30 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 			return;
+ 		}
+ 		cm_node->state = NES_CM_STATE_ESTABLISHED;
++		cleanup_retrans_entry(cm_node);
+ 		if (datasize) {
+ 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+-			cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
+-			handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_MPA_REQ);
+-		 } else { /* rcvd ACK only */
++			handle_rcv_mpa(cm_node, skb);
++		} else { /* rcvd ACK only */
+ 			dev_kfree_skb_any(skb);
+ 			cleanup_retrans_entry(cm_node);
+ 		 }
+ 		break;
+ 	case NES_CM_STATE_ESTABLISHED:
+ 		/* Passive OPEN */
+-		/* We expect mpa frame to be received only */
++		cleanup_retrans_entry(cm_node);
+ 		if (datasize) {
+ 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+-			cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
+-			handle_rcv_mpa(cm_node, skb,
+-				NES_CM_EVENT_MPA_REQ);
++			handle_rcv_mpa(cm_node, skb);
+ 		} else
+ 			drop_packet(skb);
+ 		break;
+ 	case NES_CM_STATE_MPAREQ_SENT:
++		cleanup_retrans_entry(cm_node);
+ 		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ 		if (datasize) {
+ 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+-			handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_CONNECTED);
++			handle_rcv_mpa(cm_node, skb);
+ 		} else { /* Could be just an ack pkt.. */
+ 			cleanup_retrans_entry(cm_node);
+ 			dev_kfree_skb_any(skb);
+@@ -1633,13 +1732,24 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		cleanup_retrans_entry(cm_node);
+ 		send_reset(cm_node, skb);
+ 		break;
++	case NES_CM_STATE_LAST_ACK:
++		cleanup_retrans_entry(cm_node);
++		cm_node->state = NES_CM_STATE_CLOSED;
++		cm_node->cm_id->rem_ref(cm_node->cm_id);
++	case NES_CM_STATE_CLOSING:
++		cleanup_retrans_entry(cm_node);
++		rem_ref_cm_node(cm_node->cm_core, cm_node);
++		drop_packet(skb);
++		break;
+ 	case NES_CM_STATE_FIN_WAIT1:
++		cleanup_retrans_entry(cm_node);
++		drop_packet(skb);
++		cm_node->state = NES_CM_STATE_FIN_WAIT2;
++		break;
+ 	case NES_CM_STATE_SYN_SENT:
+ 	case NES_CM_STATE_FIN_WAIT2:
+ 	case NES_CM_STATE_TSA:
+ 	case NES_CM_STATE_MPAREQ_RCVD:
+-	case NES_CM_STATE_LAST_ACK:
+-	case NES_CM_STATE_CLOSING:
+ 	case NES_CM_STATE_UNKNOWN:
+ 	default:
+ 		drop_packet(skb);
+@@ -1749,6 +1859,7 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ {
+ 	enum nes_tcpip_pkt_type	pkt_type = NES_PKT_TYPE_UNKNOWN;
+ 	struct tcphdr *tcph = tcp_hdr(skb);
++	u32     fin_set = 0;
+ 	skb_pull(skb, ip_hdr(skb)->ihl << 2);
+ 
+ 	nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
+@@ -1761,10 +1872,10 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		pkt_type = NES_PKT_TYPE_SYN;
+ 		if (tcph->ack)
+ 			pkt_type = NES_PKT_TYPE_SYNACK;
+-	} else if (tcph->fin)
+-		pkt_type = NES_PKT_TYPE_FIN;
+-	else if (tcph->ack)
++	} else if (tcph->ack)
+ 		pkt_type = NES_PKT_TYPE_ACK;
++	if (tcph->fin)
++		fin_set = 1;
+ 
+ 	switch (pkt_type) {
+ 	case NES_PKT_TYPE_SYN:
+@@ -1775,15 +1886,16 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		break;
+ 	case NES_PKT_TYPE_ACK:
+ 		handle_ack_pkt(cm_node, skb, tcph);
++		if (fin_set)
++			handle_fin_pkt(cm_node);
+ 		break;
+ 	case NES_PKT_TYPE_RST:
+ 		handle_rst_pkt(cm_node, skb, tcph);
+ 		break;
+-	case NES_PKT_TYPE_FIN:
+-		handle_fin_pkt(cm_node, skb, tcph);
+-		break;
+ 	default:
+ 		drop_packet(skb);
++		if (fin_set)
++			handle_fin_pkt(cm_node);
+ 		break;
+ 	}
+ }
+@@ -1926,7 +2038,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+ 				loopbackremotenode->tcp_cntxt.rcv_wscale;
+ 			loopbackremotenode->tcp_cntxt.snd_wscale =
+ 				cm_node->tcp_cntxt.rcv_wscale;
+-
++			loopbackremotenode->state = NES_CM_STATE_MPAREQ_RCVD;
+ 			create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
+ 		}
+ 		return cm_node;
+@@ -1981,7 +2093,11 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
+ 	struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
+ {
+ 	int ret = 0;
++	int err = 0;
+ 	int passive_state;
++	struct nes_cm_event event;
++	struct iw_cm_id *cm_id = cm_node->cm_id;
++	struct nes_cm_node *loopback = cm_node->loopbackpartner;
+ 
+ 	nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n",
+ 		__func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
+@@ -1990,12 +2106,38 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
+ 		return ret;
+ 	cleanup_retrans_entry(cm_node);
+ 
+-	passive_state = atomic_add_return(1, &cm_node->passive_state);
+-	cm_node->state = NES_CM_STATE_CLOSED;
+-	if (passive_state == NES_SEND_RESET_EVENT)
++	if (!loopback) {
++		passive_state = atomic_add_return(1, &cm_node->passive_state);
++		if (passive_state == NES_SEND_RESET_EVENT) {
++			cm_node->state = NES_CM_STATE_CLOSED;
++			rem_ref_cm_node(cm_core, cm_node);
++		} else {
++			ret = send_mpa_reject(cm_node);
++			if (ret) {
++				cm_node->state = NES_CM_STATE_CLOSED;
++				err = send_reset(cm_node, NULL);
++				if (err)
++					WARN_ON(1);
++			} else
++				cm_id->add_ref(cm_id);
++		}
++	} else {
++		cm_node->cm_id = NULL;
++		event.cm_node = loopback;
++		event.cm_info.rem_addr = loopback->rem_addr;
++		event.cm_info.loc_addr = loopback->loc_addr;
++		event.cm_info.rem_port = loopback->rem_port;
++		event.cm_info.loc_port = loopback->loc_port;
++		event.cm_info.cm_id = loopback->cm_id;
++		cm_event_mpa_reject(&event);
+ 		rem_ref_cm_node(cm_core, cm_node);
+-	else
+-		ret = send_reset(cm_node, NULL);
++		loopback->state = NES_CM_STATE_CLOSING;
++
++		cm_id = loopback->cm_id;
++		rem_ref_cm_node(cm_core, loopback);
++		cm_id->rem_ref(cm_id);
++	}
++
+ 	return ret;
+ }
+ 
+@@ -2032,6 +2174,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
+ 	case NES_CM_STATE_CLOSING:
+ 		ret = -1;
+ 		break;
++	case NES_CM_STATE_MPAREJ_RCVD:
+ 	case NES_CM_STATE_LISTENING:
+ 	case NES_CM_STATE_UNKNOWN:
+ 	case NES_CM_STATE_INITED:
+@@ -2226,15 +2369,15 @@ static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
+ 	int ret = 0;
+ 
+ 	switch (type) {
+-		case NES_CM_SET_PKT_SIZE:
+-			cm_core->mtu = value;
+-			break;
+-		case NES_CM_SET_FREE_PKT_Q_SIZE:
+-			cm_core->free_tx_pkt_max = value;
+-			break;
+-		default:
+-			/* unknown set option */
+-			ret = -EINVAL;
++	case NES_CM_SET_PKT_SIZE:
++		cm_core->mtu = value;
++		break;
++	case NES_CM_SET_FREE_PKT_Q_SIZE:
++		cm_core->free_tx_pkt_max = value;
++		break;
++	default:
++		/* unknown set option */
++		ret = -EINVAL;
+ 	}
+ 
+ 	return ret;
+@@ -2653,9 +2796,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 			NES_QPCONTEXT_ORDIRD_WRPDU);
+ 	} else {
+ 		nesqp->nesqp_context->ird_ord_sizes |=
+-			cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+-			NES_QPCONTEXT_ORDIRD_WRPDU |
+-			NES_QPCONTEXT_ORDIRD_ALSMM));
++			cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU);
+ 	}
+ 	nesqp->skip_lsmm = 1;
+ 
+@@ -2777,23 +2918,35 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+ {
+ 	struct nes_cm_node *cm_node;
++	struct nes_cm_node *loopback;
++
+ 	struct nes_cm_core *cm_core;
+ 
+ 	atomic_inc(&cm_rejects);
+ 	cm_node = (struct nes_cm_node *) cm_id->provider_data;
++	loopback = cm_node->loopbackpartner;
+ 	cm_core = cm_node->cm_core;
++	cm_node->cm_id = cm_id;
+ 	cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
+ 
++	if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
++		return -EINVAL;
++
+ 	strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
+-	memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
++	if (loopback) {
++		memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
++		loopback->mpa_frame.priv_data_len = pdata_len;
++		loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) +
++				pdata_len;
++	} else {
++		memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
++		cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
++	}
+ 
+-	cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
+ 	cm_node->mpa_frame.rev = mpa_version;
+ 	cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
+ 
+-	cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
+-
+-	return 0;
++	return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
+ }
+ 
+ 
+@@ -3302,13 +3455,55 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
+ 	cm_event.remote_addr.sin_family = AF_INET;
+ 	cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
+ 	cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+-
+-		cm_event.private_data                = cm_node->mpa_frame_buf;
+-		cm_event.private_data_len            = (u8) cm_node->mpa_frame_size;
++	cm_event.private_data = cm_node->mpa_frame_buf;
++	cm_event.private_data_len  = (u8) cm_node->mpa_frame_size;
+ 
+ 	ret = cm_id->event_handler(cm_id, &cm_event);
+ 	if (ret)
+-		printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
++		printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
++				__func__, __LINE__, ret);
++	return;
++}
++
++
++static void cm_event_mpa_reject(struct nes_cm_event *event)
++{
++	struct iw_cm_id   *cm_id;
++	struct iw_cm_event cm_event;
++	struct nes_cm_node *cm_node;
++	int ret;
++
++	cm_node = event->cm_node;
++	if (!cm_node)
++		return;
++	cm_id = cm_node->cm_id;
++
++	atomic_inc(&cm_connect_reqs);
++	nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
++			cm_node, cm_id, jiffies);
++
++	cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
++	cm_event.status = -ECONNREFUSED;
++	cm_event.provider_data = cm_id->provider_data;
++
++	cm_event.local_addr.sin_family = AF_INET;
++	cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
++	cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
++
++	cm_event.remote_addr.sin_family = AF_INET;
++	cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
++	cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
++
++	cm_event.private_data = cm_node->mpa_frame_buf;
++	cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
++
++	nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, "
++			"remove_addr=%08x\n",
++			cm_event.local_addr.sin_addr.s_addr,
++			cm_event.remote_addr.sin_addr.s_addr);
++
++	if ((ret = cm_id->event_handler(cm_id, &cm_event)))
++		printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
+ 				__func__, __LINE__, ret);
+ 
+ 	return;
+@@ -3373,6 +3567,14 @@ static void nes_cm_event_handler(struct work_struct *work)
+ 		cm_event_connected(event);
+ 		nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
+ 		break;
++	case NES_CM_EVENT_MPA_REJECT:
++		if ((!event->cm_node->cm_id) ||
++				(event->cm_node->state == NES_CM_STATE_TSA))
++			break;
++		cm_event_mpa_reject(event);
++		nes_debug(NES_DBG_CM, "CM Event: REJECT\n");
++		break;
++
+ 	case NES_CM_EVENT_ABORTED:
+ 		if ((!event->cm_node->cm_id) ||
+ 			(event->cm_node->state == NES_CM_STATE_TSA))
+diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
+index 4ab2beb..d5f7782 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.h
++++ b/drivers/infiniband/hw/nes/nes_cm.h
+@@ -39,6 +39,9 @@
+ #define NES_MANAGE_APBVT_DEL 0
+ #define NES_MANAGE_APBVT_ADD 1
+ 
++#define NES_MPA_REQUEST_ACCEPT  1
++#define NES_MPA_REQUEST_REJECT  2
++
+ /* IETF MPA -- defines, enums, structs */
+ #define IEFT_MPA_KEY_REQ  "MPA ID Req Frame"
+ #define IEFT_MPA_KEY_REP  "MPA ID Rep Frame"
+@@ -186,6 +189,7 @@ enum nes_cm_node_state {
+ 	NES_CM_STATE_ACCEPTING,
+ 	NES_CM_STATE_MPAREQ_SENT,
+ 	NES_CM_STATE_MPAREQ_RCVD,
++	NES_CM_STATE_MPAREJ_RCVD,
+ 	NES_CM_STATE_TSA,
+ 	NES_CM_STATE_FIN_WAIT1,
+ 	NES_CM_STATE_FIN_WAIT2,
+@@ -278,13 +282,12 @@ struct nes_cm_node {
+ 	struct nes_timer_entry	*send_entry;
+ 
+ 	spinlock_t                retrans_list_lock;
+-	struct list_head          recv_list;
+-	spinlock_t                recv_list_lock;
++	struct nes_timer_entry  *recv_entry;
+ 
+ 	int                       send_write0;
+ 	union {
+ 		struct ietf_mpa_frame mpa_frame;
+-		u8                    mpa_frame_buf[NES_CM_DEFAULT_MTU];
++		u8                    mpa_frame_buf[MAX_CM_BUFFER];
+ 	};
+ 	u16                       mpa_frame_size;
+ 	struct iw_cm_id           *cm_id;
+@@ -326,6 +329,7 @@ enum  nes_cm_event_type {
+ 	NES_CM_EVENT_MPA_REQ,
+ 	NES_CM_EVENT_MPA_CONNECT,
+ 	NES_CM_EVENT_MPA_ACCEPT,
++	NES_CM_EVENT_MPA_REJECT,
+ 	NES_CM_EVENT_MPA_ESTABLISHED,
+ 	NES_CM_EVENT_CONNECTED,
+ 	NES_CM_EVENT_CLOSED,
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0210_no_proc_fin.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0210_no_proc_fin.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0210_no_proc_fin.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,304 @@
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 9c47a29..7e39fc4 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -56,6 +56,7 @@
+ #include <net/neighbour.h>
+ #include <net/route.h>
+ #include <net/ip_fib.h>
++#include <net/tcp.h>
+ 
+ #include "nes.h"
+ 
+@@ -426,6 +427,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	if (type == NES_TIMER_TYPE_CLOSE) {
+ 		new_send->timetosend += (HZ/10);
+ 		if (cm_node->recv_entry) {
++			kfree(new_send);
+ 			WARN_ON(1);
+ 			return -EINVAL;
+ 		}
+@@ -445,8 +447,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		if (ret != NETDEV_TX_OK) {
+ 			nes_debug(NES_DBG_CM, "Error sending packet %p "
+ 				"(jiffies = %lu)\n", new_send, jiffies);
+-			atomic_dec(&new_send->skb->users);
+ 			new_send->timetosend = jiffies;
++			/* Return OK as we are going to rexmit */
++			ret = NETDEV_TX_OK;
+ 		} else {
+ 			cm_packets_sent++;
+ 			if (!send_retrans) {
+@@ -539,6 +542,7 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 	struct list_head *list_node;
+ 	struct nes_cm_core *cm_core = g_cm_core;
+ 	u32 settimer = 0;
++	unsigned long timetosend;
+ 	int ret = NETDEV_TX_OK;
+ 
+ 	struct list_head timer_list;
+@@ -630,7 +634,6 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 				nes_debug(NES_DBG_CM, "rexmit failed for "
+ 					"node=%p\n", cm_node);
+ 				cm_packets_bounced++;
+-				atomic_dec(&send_entry->skb->users);
+ 				send_entry->retrycount--;
+ 				nexttimeout = jiffies + NES_SHORT_TIME;
+ 				settimer = 1;
+@@ -644,8 +647,10 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 				send_entry->retrycount);
+ 			if (send_entry->send_retrans) {
+ 				send_entry->retranscount--;
++				timetosend = (NES_RETRY_TIMEOUT <<
++					(NES_DEFAULT_RETRANS - send_entry->retranscount));
+ 				send_entry->timetosend = jiffies +
+-					NES_RETRY_TIMEOUT;
++					min(timetosend, NES_MAX_TIMEOUT);
+ 				if (nexttimeout > send_entry->timetosend ||
+ 					!settimer) {
+ 					nexttimeout = send_entry->timetosend;
+@@ -666,11 +671,6 @@ static void nes_cm_timer_tick(unsigned long pass)
+ 
+ 		spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ 		rem_ref_cm_node(cm_node->cm_core, cm_node);
+-		if (ret != NETDEV_TX_OK) {
+-			nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n",
+-				cm_node);
+-			break;
+-		}
+ 	}
+ 
+ 	if (settimer) {
+@@ -1267,7 +1267,6 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+ 		cm_node->nesqp = NULL;
+ 	}
+ 
+-	cm_node->freed = 1;
+ 	kfree(cm_node);
+ 	return 0;
+ }
+@@ -1336,18 +1335,20 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node)
+ 	nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
+ 		"refcnt=%d\n", cm_node, cm_node->state,
+ 		atomic_read(&cm_node->ref_count));
+-	cm_node->tcp_cntxt.rcv_nxt++;
+-	cleanup_retrans_entry(cm_node);
+ 	switch (cm_node->state) {
+ 	case NES_CM_STATE_SYN_RCVD:
+ 	case NES_CM_STATE_SYN_SENT:
+ 	case NES_CM_STATE_ESTABLISHED:
+ 	case NES_CM_STATE_MPAREQ_SENT:
+ 	case NES_CM_STATE_MPAREJ_RCVD:
++		cm_node->tcp_cntxt.rcv_nxt++;
++		cleanup_retrans_entry(cm_node);
+ 		cm_node->state = NES_CM_STATE_LAST_ACK;
+ 		send_fin(cm_node, NULL);
+ 		break;
+ 	case NES_CM_STATE_FIN_WAIT1:
++		cm_node->tcp_cntxt.rcv_nxt++;
++		cleanup_retrans_entry(cm_node);
+ 		cm_node->state = NES_CM_STATE_CLOSING;
+ 		send_ack(cm_node, NULL);
+ 		/* Wait for ACK as this is simultanous close..
+@@ -1355,11 +1356,15 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node)
+ 		* Just rm the node.. Done.. */
+ 		break;
+ 	case NES_CM_STATE_FIN_WAIT2:
++		cm_node->tcp_cntxt.rcv_nxt++;
++		cleanup_retrans_entry(cm_node);
+ 		cm_node->state = NES_CM_STATE_TIME_WAIT;
+ 		send_ack(cm_node, NULL);
+ 		schedule_nes_timer(cm_node, NULL,  NES_TIMER_TYPE_CLOSE, 1, 0);
+ 		break;
+ 	case NES_CM_STATE_TIME_WAIT:
++		cm_node->tcp_cntxt.rcv_nxt++;
++		cleanup_retrans_entry(cm_node);
+ 		cm_node->state = NES_CM_STATE_CLOSED;
+ 		rem_ref_cm_node(cm_node->cm_core, cm_node);
+ 		break;
+@@ -1466,6 +1471,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
+ 				NES_PASSIVE_STATE_INDICATED);
+ 		break;
+ 	case NES_CM_STATE_MPAREQ_SENT:
++		cleanup_retrans_entry(cm_node);
+ 		if (res_type == NES_MPA_REQUEST_REJECT) {
+ 			type = NES_CM_EVENT_MPA_REJECT;
+ 			cm_node->state = NES_CM_STATE_MPAREJ_RCVD;
+@@ -1529,7 +1535,7 @@ static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph,
+ 	rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ 	if (ack_seq != loc_seq_num)
+ 		err = 1;
+-	else if ((seq + rcv_wnd) < rcv_nxt)
++	else if (!between(seq, rcv_nxt, (rcv_nxt+rcv_wnd)))
+ 		err = 1;
+ 	if (err) {
+ 		nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
+@@ -1663,49 +1669,39 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	}
+ }
+ 
+-static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
++static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	struct tcphdr *tcph)
+ {
+ 	int datasize = 0;
+ 	u32 inc_sequence;
+ 	u32 rem_seq_ack;
+ 	u32 rem_seq;
+-	int ret;
++	int ret = 0;
+ 	int optionsize;
+ 	optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ 
+ 	if (check_seq(cm_node, tcph, skb))
+-		return;
++		return -EINVAL;
+ 
+ 	skb_pull(skb, tcph->doff << 2);
+ 	inc_sequence = ntohl(tcph->seq);
+ 	rem_seq = ntohl(tcph->seq);
+ 	rem_seq_ack =  ntohl(tcph->ack_seq);
+ 	datasize = skb->len;
+-	cleanup_retrans_entry(cm_node);
+ 	switch (cm_node->state) {
+ 	case NES_CM_STATE_SYN_RCVD:
+ 		/* Passive OPEN */
++		cleanup_retrans_entry(cm_node);
+ 		ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 1);
+ 		if (ret)
+ 			break;
+ 		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+-		if (cm_node->tcp_cntxt.rem_ack_num !=
+-		    cm_node->tcp_cntxt.loc_seq_num) {
+-			nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n");
+-			cleanup_retrans_entry(cm_node);
+-			send_reset(cm_node, skb);
+-			return;
+-		}
+ 		cm_node->state = NES_CM_STATE_ESTABLISHED;
+-		cleanup_retrans_entry(cm_node);
+ 		if (datasize) {
+ 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ 			handle_rcv_mpa(cm_node, skb);
+-		} else { /* rcvd ACK only */
++		} else  /* rcvd ACK only */
+ 			dev_kfree_skb_any(skb);
+-			cleanup_retrans_entry(cm_node);
+-		 }
+ 		break;
+ 	case NES_CM_STATE_ESTABLISHED:
+ 		/* Passive OPEN */
+@@ -1717,15 +1713,12 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 			drop_packet(skb);
+ 		break;
+ 	case NES_CM_STATE_MPAREQ_SENT:
+-		cleanup_retrans_entry(cm_node);
+ 		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ 		if (datasize) {
+ 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ 			handle_rcv_mpa(cm_node, skb);
+-		} else { /* Could be just an ack pkt.. */
+-			cleanup_retrans_entry(cm_node);
++		} else  /* Could be just an ack pkt.. */
+ 			dev_kfree_skb_any(skb);
+-		}
+ 		break;
+ 	case NES_CM_STATE_LISTENING:
+ 	case NES_CM_STATE_CLOSED:
+@@ -1733,11 +1726,10 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		send_reset(cm_node, skb);
+ 		break;
+ 	case NES_CM_STATE_LAST_ACK:
++	case NES_CM_STATE_CLOSING:
+ 		cleanup_retrans_entry(cm_node);
+ 		cm_node->state = NES_CM_STATE_CLOSED;
+ 		cm_node->cm_id->rem_ref(cm_node->cm_id);
+-	case NES_CM_STATE_CLOSING:
+-		cleanup_retrans_entry(cm_node);
+ 		rem_ref_cm_node(cm_node->cm_core, cm_node);
+ 		drop_packet(skb);
+ 		break;
+@@ -1752,9 +1744,11 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	case NES_CM_STATE_MPAREQ_RCVD:
+ 	case NES_CM_STATE_UNKNOWN:
+ 	default:
++		cleanup_retrans_entry(cm_node);
+ 		drop_packet(skb);
+ 		break;
+ 	}
++	return ret;
+ }
+ 
+ 
+@@ -1860,6 +1854,7 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 	enum nes_tcpip_pkt_type	pkt_type = NES_PKT_TYPE_UNKNOWN;
+ 	struct tcphdr *tcph = tcp_hdr(skb);
+ 	u32     fin_set = 0;
++	int ret = 0;
+ 	skb_pull(skb, ip_hdr(skb)->ihl << 2);
+ 
+ 	nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
+@@ -1885,17 +1880,17 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		handle_synack_pkt(cm_node, skb, tcph);
+ 		break;
+ 	case NES_PKT_TYPE_ACK:
+-		handle_ack_pkt(cm_node, skb, tcph);
+-		if (fin_set)
++		ret = handle_ack_pkt(cm_node, skb, tcph);
++		if (fin_set && !ret)
+ 			handle_fin_pkt(cm_node);
+ 		break;
+ 	case NES_PKT_TYPE_RST:
+ 		handle_rst_pkt(cm_node, skb, tcph);
+ 		break;
+ 	default:
+-		drop_packet(skb);
+-		if (fin_set)
++		if ((fin_set) && (!check_seq(cm_node, tcph, skb)))
+ 			handle_fin_pkt(cm_node);
++		drop_packet(skb);
+ 		break;
+ 	}
+ }
+@@ -2004,13 +1999,17 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+ 		if (loopbackremotelistener == NULL) {
+ 			create_event(cm_node, NES_CM_EVENT_ABORTED);
+ 		} else {
+-			atomic_inc(&cm_loopbacks);
+ 			loopback_cm_info = *cm_info;
+ 			loopback_cm_info.loc_port = cm_info->rem_port;
+ 			loopback_cm_info.rem_port = cm_info->loc_port;
+ 			loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
+ 			loopbackremotenode = make_cm_node(cm_core, nesvnic,
+ 				&loopback_cm_info, loopbackremotelistener);
++			if (!loopbackremotenode) {
++				rem_ref_cm_node(cm_node->cm_core, cm_node);
++				return NULL;
++			}
++			atomic_inc(&cm_loopbacks);
+ 			loopbackremotenode->loopbackpartner = cm_node;
+ 			loopbackremotenode->tcp_cntxt.rcv_wscale =
+ 				NES_CM_DEFAULT_RCV_WND_SCALE;
+diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
+index d5f7782..8b7e7c0 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.h
++++ b/drivers/infiniband/hw/nes/nes_cm.h
+@@ -149,6 +149,7 @@ struct nes_timer_entry {
+ #endif
+ #define NES_SHORT_TIME      (10)
+ #define NES_LONG_TIME       (2000*HZ/1000)
++#define NES_MAX_TIMEOUT     ((unsigned long) (12*HZ))
+ 
+ #define NES_CM_HASHTABLE_SIZE         1024
+ #define NES_CM_TCP_TIMER_INTERVAL     3000
+@@ -298,7 +299,6 @@ struct nes_cm_node {
+ 	struct nes_vnic           *nesvnic;
+ 	int                       apbvt_set;
+ 	int                       accept_pend;
+-	int			freed;
+ 	struct list_head	timer_entry;
+ 	struct list_head	reset_entry;
+ 	struct nes_qp		*nesqp;
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0220_sfp_plus.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0220_sfp_plus.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0220_sfp_plus.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,350 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 898a03a..a1dd51c 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -757,6 +757,10 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
+ 			((port_count > 2) &&
+ 			(nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) {
+ 			/* init serdes 1 */
++			if (nesadapter->phy_type[0] == NES_PHY_TYPE_ARGUS) {
++				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
++				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
++			}
+ 			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
+ 			if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+ 				serdes_common_control = nes_read_indexed(nesdev,
+@@ -1265,197 +1269,146 @@ int nes_init_phy(struct nes_device *nesdev)
+ 	u16 phy_data;
+ 	u32 temp_phy_data = 0;
+ 	u32 temp_phy_data2 = 0;
+-	u32 i = 0;
++	u8  phy_type = nesadapter->phy_type[mac_index];
++	u8  phy_index = nesadapter->phy_index[mac_index];
+ 
+ 	if ((nesadapter->OneG_Mode) &&
+-	    (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
++	    (phy_type != NES_PHY_TYPE_PUMA_1G)) {
+ 		nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
+-		if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
+-			printk(PFX "%s: Programming mdc config for 1G\n", __func__);
++		if (phy_type == NES_PHY_TYPE_1G) {
+ 			tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ 			tx_config &= 0xFFFFFFE3;
+ 			tx_config |= 0x04;
+ 			nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ 		}
+ 
+-		nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n",
+-				nesadapter->phy_index[mac_index], phy_data);
+-		nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000);
++		nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
++		nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
+ 
+ 		/* Reset the PHY */
+-		nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000);
++		nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
+ 		udelay(100);
+ 		counter = 0;
+ 		do {
+-			nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+-			nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
+-			if (counter++ > 100) break;
++			nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
++			if (counter++ > 100)
++				break;
+ 		} while (phy_data & 0x8000);
+ 
+ 		/* Setting no phy loopback */
+ 		phy_data &= 0xbfff;
+ 		phy_data |= 0x1140;
+-		nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index],  phy_data);
+-		nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
+-
+-		nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data);
+-
+-		nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data);
++		nes_write_1G_phy_reg(nesdev, 0, phy_index,  phy_data);
++		nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
++		nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
++		nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
+ 
+ 		/* Setting the interrupt mask */
+-		nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
+-		nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee);
+-
+-		nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
++		nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
++		nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
++		nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+ 
+ 		/* turning on flow control */
+-		nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
+-		nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
+-				(phy_data & ~(0x03E0)) | 0xc00);
+-		/* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
+-				phy_data | 0xc00); */
+-		nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
+-
+-		nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
+-		/* Clear Half duplex */
+-		nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index],
+-				phy_data & ~(0x0100));
+-		nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
++		nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
++		nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
++		nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
+ 
+-		nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+-		nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300);
+-	} else {
+-		if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) ||
+-		    (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
+-			/* setup 10G MDIO operation */
+-			tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+-			tx_config &= 0xFFFFFFE3;
+-			tx_config |= 0x15;
+-			nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+-		}
+-		if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
+-			nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
++		/* Clear Half duplex */
++		nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
++		nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
++		nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+ 
+-			temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+-			mdelay(10);
+-			nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
+-			temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
++		nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
++		nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
+ 
+-			/*
+-			 * if firmware is already running (like from a
+-			 * driver un-load/load, don't do anything.
+-			 */
+-			if (temp_phy_data == temp_phy_data2) {
+-				/* configure QT2505 AMCC PHY */
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0x0000, 0x8000);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0000);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc302, 0x0044);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc318, 0x0052);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0001);
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528);
++		return 0;
++	}
+ 
+-				/*
+-				 * remove micro from reset; chip boots from ROM,
+-				 * uploads EEPROM f/w image, uC executes f/w
+-				 */
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0002);
++	if ((phy_type == NES_PHY_TYPE_IRIS) ||
++	    (phy_type == NES_PHY_TYPE_ARGUS)) {
++		/* setup 10G MDIO operation */
++		tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
++		tx_config &= 0xFFFFFFE3;
++		tx_config |= 0x15;
++		nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
++	}
++	if ((phy_type == NES_PHY_TYPE_ARGUS)) {
++		/* Check firmware heartbeat */
++		nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
++		temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
++		udelay(1500);
++		nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
++		temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ 
+-				/*
+-				 * wait for heart beat to start to
+-				 * know loading is done
+-				 */
+-				counter = 0;
+-				do {
+-					nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
+-					temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+-					if (counter++ > 1000) {
+-						nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from heartbeat check <this is bad!!!> \n");
+-						break;
+-					}
+-					mdelay(100);
+-					nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
+-					temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+-				} while ((temp_phy_data2 == temp_phy_data));
++		if (temp_phy_data != temp_phy_data2)
++			return 0;
+ 
+-				/*
+-				 * wait for tracking to start to know
+-				 * f/w is good to go
+-				 */
+-				counter = 0;
+-				do {
+-					nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7fd);
+-					temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+-					if (counter++ > 1000) {
+-						nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from status check <this is bad!!!> \n");
+-						break;
+-					}
+-					mdelay(1000);
+-					/*
+-					 * nes_debug(NES_DBG_PHY, "AMCC PHY- phy_status not ready yet = 0x%02X\n",
+-					 *			temp_phy_data);
+-					 */
+-				} while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
+-
+-				/* set LOS Control invert RXLOSB_I_PADINV */
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd003, 0x0000);
+-				/* set LOS Control to mask of RXLOSB_I */
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc314, 0x0042);
+-				/* set LED1 to input mode (LED1 and LED2 share same LED) */
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd006, 0x0007);
+-				/* set LED2 to RX link_status and activity */
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd007, 0x000A);
+-				/* set LED3 to RX link_status */
+-				nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd008, 0x0009);
++		/* no heartbeat, configure the PHY */
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
+ 
+-				/*
+-				 * reset the res-calibration on t2
+-				 * serdes; ensures it is stable after
+-				 * the amcc phy is stable
+-				 */
++		/* setup LEDs */
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
+ 
+-				sds_common_control0  = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
+-				sds_common_control0 |= 0x1;
+-				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
+ 
+-				/* release the res-calibration reset */
+-				sds_common_control0 &= 0xfffffffe;
+-				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
++		/* Bring PHY out of reset */
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
+ 
+-				i = 0;
+-				while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
+-						&& (i++ < 5000)) {
+-					/* mdelay(1); */
+-				}
++		/* Check for heartbeat */
++		counter = 0;
++		mdelay(690);
++		nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
++		temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
++		do {
++			if (counter++ > 150) {
++				nes_debug(NES_DBG_PHY, "No PHY heartbeat\n");
++				break;
++			}
++			mdelay(1);
++			nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
++			temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
++		} while ((temp_phy_data2 == temp_phy_data));
+ 
+-				/*
+-				 * wait for link train done before moving on,
+-				 * or will get an interupt storm
+-				 */
+-				counter = 0;
+-				do {
+-					temp_phy_data = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+-								(0x200 * (nesdev->mac_index & 1)));
+-					if (counter++ > 1000) {
+-						nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from link train wait <this is bad, link didnt train!!!>\n");
+-						break;
+-					}
+-					mdelay(1);
+-				} while (((temp_phy_data & 0x0f1f0000) != 0x0f0f0000));
++		/* wait for tracking */
++		counter = 0;
++		do {
++			nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
++			temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
++			if (counter++ > 300) {
++				nes_debug(NES_DBG_PHY, "PHY did not track\n");
++				break;
+ 			}
+-		}
++			mdelay(10);
++		} while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
++
++		/* setup signal integrity */
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
++		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
++
++		/* reset serdes */
++		sds_common_control0  = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
++		sds_common_control0 |= 0x1;
++		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
++		sds_common_control0 &= 0xfffffffe;
++		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
++
++		counter = 0;
++		while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
++				&& (counter++ < 5000))
++			;
+ 	}
+ 	return 0;
+ }
+@@ -2483,19 +2436,18 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+ 				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004);
+ 				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005);
+ 				/* check link status */
+-				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
++				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
+ 				temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+-				u32temp = 100;
+-				do {
+-					nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
+ 
+-					phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+-					if ((phy_data == temp_phy_data) || (!(--u32temp)))
+-						break;
+-					temp_phy_data = phy_data;
+-				} while (1);
++				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
++				nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
++				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
++				phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
++
++				phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
++
+ 				nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+-					__func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
++					__func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
+ 				break;
+ 
+ 			case NES_PHY_TYPE_PUMA_1G:
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0230_bnt.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0230_bnt.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0230_bnt.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,228 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index a1dd51c..9a08665 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -46,6 +46,10 @@ static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
+ module_param(nes_lro_max_aggr, uint, 0444);
+ MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
+ 
++static int wide_ppm_offset;
++module_param(wide_ppm_offset, int, 0644);
++MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
++
+ static u32 crit_err_count;
+ u32 int_mod_timer_init;
+ u32 int_mod_cq_depth_256;
+@@ -521,74 +525,6 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
+ 	INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]);
+ 	INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]);
+ 
+-	if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
+-		u32 pcs_control_status0, pcs_control_status1;
+-		u32 reset_value;
+-		u32 i = 0;
+-		u32 int_cnt = 0;
+-		u32 ext_cnt = 0;
+-		unsigned long flags;
+-		u32 j = 0;
+-
+-		pcs_control_status0 = nes_read_indexed(nesdev,
+-			NES_IDX_PHY_PCS_CONTROL_STATUS0);
+-		pcs_control_status1 = nes_read_indexed(nesdev,
+-			NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+-
+-		for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
+-			pcs_control_status0 = nes_read_indexed(nesdev,
+-					NES_IDX_PHY_PCS_CONTROL_STATUS0);
+-			pcs_control_status1 = nes_read_indexed(nesdev,
+-					NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+-			if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
+-			    || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
+-				int_cnt++;
+-			msleep(1);
+-		}
+-		if (int_cnt > 1) {
+-			spin_lock_irqsave(&nesadapter->phy_lock, flags);
+-			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
+-			mh_detected++;
+-			reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+-			reset_value |= 0x0000003d;
+-			nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+-
+-			while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+-				& 0x00000040) != 0x00000040) && (j++ < 5000));
+-			spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+-
+-			pcs_control_status0 = nes_read_indexed(nesdev,
+-					NES_IDX_PHY_PCS_CONTROL_STATUS0);
+-			pcs_control_status1 = nes_read_indexed(nesdev,
+-					NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+-
+-			for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
+-				pcs_control_status0 = nes_read_indexed(nesdev,
+-					NES_IDX_PHY_PCS_CONTROL_STATUS0);
+-				pcs_control_status1 = nes_read_indexed(nesdev,
+-					NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+-				if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
+-					|| (0x0F000100 == (pcs_control_status1 & 0x0F000100))) {
+-					if (++ext_cnt > int_cnt) {
+-						spin_lock_irqsave(&nesadapter->phy_lock, flags);
+-						nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1,
+-								0x0000F0C8);
+-						mh_detected++;
+-						reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+-						reset_value |= 0x0000003d;
+-						nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+-
+-						while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+-							& 0x00000040) != 0x00000040) && (j++ < 5000));
+-						spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+-						break;
+-					}
+-				}
+-				msleep(1);
+-			}
+-		}
+-	}
+-
+ 	if (nesadapter->hw_rev == NE020_REV) {
+ 		init_timer(&nesadapter->mh_timer);
+ 		nesadapter->mh_timer.function = nes_mh_fix;
+@@ -736,43 +672,48 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
+ {
+ 	int i;
+ 	u32 u32temp;
+-	u32 serdes_common_control;
++	u32 sds;
+ 
+ 	if (hw_rev != NE020_REV) {
+ 		/* init serdes 0 */
++		if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4))
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
++		else
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ 
+-		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ 		if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+-			serdes_common_control = nes_read_indexed(nesdev,
+-					NES_IDX_ETH_SERDES_COMMON_CONTROL0);
+-			serdes_common_control |= 0x000000100;
+-			nes_write_indexed(nesdev,
+-					NES_IDX_ETH_SERDES_COMMON_CONTROL0,
+-					serdes_common_control);
+-		} else if (!OneG_Mode) {
+-			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
++			sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
++			sds |= 0x00000100;
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
+ 		}
+-		if (((port_count > 1) &&
+-			(nesadapter->phy_type[0] != NES_PHY_TYPE_PUMA_1G)) ||
+-			((port_count > 2) &&
+-			(nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) {
+-			/* init serdes 1 */
+-			if (nesadapter->phy_type[0] == NES_PHY_TYPE_ARGUS) {
+-				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
+-				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
+-			}
+-			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
+-			if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+-				serdes_common_control = nes_read_indexed(nesdev,
+-					NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+-				serdes_common_control |= 0x000000100;
+-				nes_write_indexed(nesdev,
+-					NES_IDX_ETH_SERDES_COMMON_CONTROL1,
+-					serdes_common_control);
+-			} else if (!OneG_Mode) {
+-				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
+-			}
++		if (!OneG_Mode)
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
++
++		if (port_count < 2)
++			return 0;
++
++		/* init serdes 1 */
++		switch (nesadapter->phy_type[1]) {
++		case  NES_PHY_TYPE_ARGUS:
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
++			break;
++		case NES_PHY_TYPE_CX4:
++			sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
++			sds &= 0xFFFFFFBF;
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
++			if (wide_ppm_offset)
++				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
++			else
++				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
++			break;
++		case NES_PHY_TYPE_PUMA_1G:
++			sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
++			sds |= 0x000000100;
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
+ 		}
++		if (!OneG_Mode)
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
+ 	} else {
+ 		/* init serdes 0 */
+ 		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
+@@ -2312,6 +2253,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+ 	u16 temp_phy_data;
+ 	u32 pcs_val  = 0x0f0f0000;
+ 	u32 pcs_mask = 0x0f1f0000;
++	u32 cdr_ctrl;
+ 
+ 	spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ 	if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
+@@ -2463,6 +2405,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+ 		}
+ 
+ 		if (phy_data & 0x0004) {
++			if (wide_ppm_offset &&
++			    (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) &&
++			    (nesadapter->hw_rev != NE020_REV)) {
++				cdr_ctrl = nes_read_indexed(nesdev,
++							    NES_IDX_ETH_SERDES_CDR_CONTROL0 +
++							    mac_index * 0x200);
++				nes_write_indexed(nesdev,
++						  NES_IDX_ETH_SERDES_CDR_CONTROL0 +
++						  mac_index * 0x200,
++						  cdr_ctrl | 0x000F0000);
++			}
+ 			nesadapter->mac_link_down[mac_index] = 0;
+ 			list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ 				nes_debug(NES_DBG_PHY, "The Link is UP!!.  linkup was %d\n",
+@@ -2477,6 +2430,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+ 				}
+ 			}
+ 		} else {
++			if (wide_ppm_offset &&
++			    (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) &&
++			    (nesadapter->hw_rev != NE020_REV)) {
++				cdr_ctrl = nes_read_indexed(nesdev,
++							    NES_IDX_ETH_SERDES_CDR_CONTROL0 +
++							    mac_index * 0x200);
++				nes_write_indexed(nesdev,
++						  NES_IDX_ETH_SERDES_CDR_CONTROL0 +
++						  mac_index * 0x200,
++						  cdr_ctrl & 0xFFF0FFFF);
++			}
+ 			nesadapter->mac_link_down[mac_index] = 1;
+ 			list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ 				nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
+diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
+index f41a871..13bc26a 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.h
++++ b/drivers/infiniband/hw/nes/nes_hw.h
+@@ -35,6 +35,7 @@
+ 
+ #include <linux/inet_lro.h>
+ 
++#define NES_PHY_TYPE_CX4       1
+ #define NES_PHY_TYPE_1G        2
+ #define NES_PHY_TYPE_IRIS      3
+ #define NES_PHY_TYPE_ARGUS     4
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0240_mem_reg.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0240_mem_reg.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0240_mem_reg.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,41 @@
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index a7c0c1f..83c999c 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -2216,15 +2216,6 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ 			root_pbl_index++;
+ 			cur_pbl_index = 0;
+ 		}
+-		if (buffer_list[i].addr & ~PAGE_MASK) {
+-			/* TODO: Unwind allocated buffers */
+-			nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+-			nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
+-					(unsigned int) buffer_list[i].addr);
+-			ibmr = ERR_PTR(-EINVAL);
+-			kfree(nesmr);
+-			goto reg_phys_err;
+-		}
+ 
+ 		if (!buffer_list[i].size) {
+ 			nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+@@ -2239,7 +2230,7 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ 			if ((buffer_list[i-1].addr+PAGE_SIZE) != buffer_list[i].addr)
+ 				single_page = 0;
+ 		}
+-		vpbl.pbl_vbase[cur_pbl_index].pa_low = cpu_to_le32((u32)buffer_list[i].addr);
++		vpbl.pbl_vbase[cur_pbl_index].pa_low = cpu_to_le32((u32)buffer_list[i].addr & PAGE_MASK);
+ 		vpbl.pbl_vbase[cur_pbl_index++].pa_high =
+ 				cpu_to_le32((u32)((((u64)buffer_list[i].addr) >> 32)));
+ 	}
+@@ -2252,8 +2243,6 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ 			" length = 0x%016lX, index = 0x%08X\n",
+ 			stag, (unsigned long)*iova_start, (unsigned long)region_length, stag_index);
+ 
+-	region_length -= (*iova_start)&PAGE_MASK;
+-
+ 	/* Make the leaf PBL the root if only one PBL */
+ 	if (root_pbl_index == 1) {
+ 		root_vpbl.pbl_pbase = vpbl.pbl_pbase;
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0250_cast.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0250_cast.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0250_cast.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,53 @@
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index 09dab29..e156451 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -289,8 +289,8 @@ static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad)
+ static inline void
+ set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
+ {
+-	wqe_words[index]     = cpu_to_le32((u32) ((unsigned long)value));
+-	wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value)));
++	wqe_words[index]     = cpu_to_le32((u32)value);
++	wqe_words[index + 1] = cpu_to_le32(upper_32_bits(value));
+ }
+ 
+ static inline void
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 7e39fc4..91d5372 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -2692,7 +2692,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	struct ib_mr *ibmr = NULL;
+ 	struct ib_phys_buf ibphysbuf;
+ 	struct nes_pd *nespd;
+-
++	u64 tagged_offset;
+ 
+ 
+ 	ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
+@@ -2757,10 +2757,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		ibphysbuf.addr = nesqp->ietf_frame_pbase;
+ 		ibphysbuf.size = conn_param->private_data_len + 
+ 					sizeof(struct ietf_mpa_frame);
++		tagged_offset = (u64)(unsigned long)nesqp->ietf_frame;
+ 		ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, 
+ 						&ibphysbuf, 1,
+ 						IB_ACCESS_LOCAL_WRITE, 
+-						(u64 *)&nesqp->ietf_frame);
++						&tagged_offset);
+ 		if (!ibmr) {
+ 			nes_debug(NES_DBG_CM, "Unable to register memory region"
+ 					"for lSMM for cm_node = %p \n",
+@@ -2784,7 +2785,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 			sizeof(struct ietf_mpa_frame));
+ 		set_wqe_64bit_value(wqe->wqe_words,
+ 					NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+-					(u64)nesqp->ietf_frame);
++					(u64)(unsigned long)nesqp->ietf_frame);
+ 		wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
+ 			cpu_to_le32(conn_param->private_data_len +
+ 			sizeof(struct ietf_mpa_frame));
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0260_version.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0260_version.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0260_version.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,19 @@
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index e156451..2872e1b 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -56,10 +56,8 @@
+ 
+ #define QUEUE_DISCONNECTS
+ 
+-#define DRV_BUILD   "1"
+-
+ #define DRV_NAME    "iw_nes"
+-#define DRV_VERSION "1.0 OFED Build " DRV_BUILD
++#define DRV_VERSION "1.4.1.0 OFED"
+ #define PFX         DRV_NAME ": "
+ 
+ /*
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0270_netdev_stop.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0270_netdev_stop.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0270_netdev_stop.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index d16e9bc..67ab146 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -307,12 +307,6 @@ static int nes_netdev_stop(struct net_device *netdev)
+ 	nic_active &= nic_active_mask;
+ 	nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
+ 
+-
+-	if (nesvnic->of_device_registered) {
+-		nes_destroy_ofa_device(nesvnic->nesibdev);
+-		nesvnic->nesibdev = NULL;
+-		nesvnic->of_device_registered = 0;
+-	}
+ 	nes_destroy_nic_qp(nesvnic);
+ 
+ 	nesvnic->netdev_open = 0;
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0280_sfp_plus_d.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0280_sfp_plus_d.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0280_sfp_plus_d.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,157 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 9a08665..f0fe63d 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -694,7 +694,8 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
+ 
+ 		/* init serdes 1 */
+ 		switch (nesadapter->phy_type[1]) {
+-		case  NES_PHY_TYPE_ARGUS:
++		case NES_PHY_TYPE_ARGUS:
++		case NES_PHY_TYPE_SFP_D:
+ 			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
+ 			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
+ 			break;
+@@ -1266,14 +1267,16 @@ int nes_init_phy(struct nes_device *nesdev)
+ 	}
+ 
+ 	if ((phy_type == NES_PHY_TYPE_IRIS) ||
+-	    (phy_type == NES_PHY_TYPE_ARGUS)) {
++	    (phy_type == NES_PHY_TYPE_ARGUS) ||
++	    (phy_type == NES_PHY_TYPE_SFP_D)) {
+ 		/* setup 10G MDIO operation */
+ 		tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ 		tx_config &= 0xFFFFFFE3;
+ 		tx_config |= 0x15;
+ 		nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ 	}
+-	if ((phy_type == NES_PHY_TYPE_ARGUS)) {
++	if ((phy_type == NES_PHY_TYPE_ARGUS) ||
++	    (phy_type == NES_PHY_TYPE_SFP_D)) {
+ 		/* Check firmware heartbeat */
+ 		nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ 		temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+@@ -1287,10 +1290,15 @@ int nes_init_phy(struct nes_device *nesdev)
+ 		/* no heartbeat, configure the PHY */
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
+-		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
+-		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
++		if (phy_type == NES_PHY_TYPE_ARGUS) {
++			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
++			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
++		} else {
++			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
++			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
++		}
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
+@@ -2368,6 +2376,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+ 				break;
+ 
+ 			case NES_PHY_TYPE_ARGUS:
++			case NES_PHY_TYPE_SFP_D:
+ 				/* clear the alarms */
+ 				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
+ 				nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
+diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
+index 13bc26a..c3654c6 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.h
++++ b/drivers/infiniband/hw/nes/nes_hw.h
+@@ -42,6 +42,7 @@
+ #define NES_PHY_TYPE_PUMA_1G   5
+ #define NES_PHY_TYPE_PUMA_10G  6
+ #define NES_PHY_TYPE_GLADIUS   7
++#define NES_PHY_TYPE_SFP_D     8
+ 
+ #define NES_MULTICAST_PF_MAX 8
+ 
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index 67ab146..ef13030 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -1423,49 +1423,55 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
+ 	struct nes_vnic *nesvnic = netdev_priv(netdev);
+ 	struct nes_device *nesdev = nesvnic->nesdev;
+ 	struct nes_adapter *nesadapter = nesdev->nesadapter;
++	u32 mac_index = nesdev->mac_index;
++	u8 phy_type = nesadapter->phy_type[mac_index];
++	u8 phy_index = nesadapter->phy_index[mac_index];
+ 	u16 phy_data;
+ 
+ 	et_cmd->duplex = DUPLEX_FULL;
+ 	et_cmd->port   = PORT_MII;
++	et_cmd->maxtxpkt = 511;
++	et_cmd->maxrxpkt = 511;
+ 
+ 	if (nesadapter->OneG_Mode) {
+ 		et_cmd->speed = SPEED_1000;
+-		if (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
++		if (phy_type == NES_PHY_TYPE_PUMA_1G) {
+ 			et_cmd->supported   = SUPPORTED_1000baseT_Full;
+ 			et_cmd->advertising = ADVERTISED_1000baseT_Full;
+ 			et_cmd->autoneg     = AUTONEG_DISABLE;
+ 			et_cmd->transceiver = XCVR_INTERNAL;
+-			et_cmd->phy_address = nesdev->mac_index;
++			et_cmd->phy_address = mac_index;
+ 		} else {
+-			et_cmd->supported   = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg;
+-			et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg;
+-			nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], &phy_data);
++			et_cmd->supported   = SUPPORTED_1000baseT_Full
++					    | SUPPORTED_Autoneg;
++			et_cmd->advertising = ADVERTISED_1000baseT_Full
++					    | ADVERTISED_Autoneg;
++			nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ 			if (phy_data & 0x1000)
+ 				et_cmd->autoneg = AUTONEG_ENABLE;
+ 			else
+ 				et_cmd->autoneg = AUTONEG_DISABLE;
+ 			et_cmd->transceiver = XCVR_EXTERNAL;
+-			et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
++			et_cmd->phy_address = phy_index;
+ 		}
++		return 0;
++	}
++	if ((phy_type == NES_PHY_TYPE_IRIS) ||
++	    (phy_type == NES_PHY_TYPE_ARGUS) ||
++	    (phy_type == NES_PHY_TYPE_SFP_D)) {
++		et_cmd->transceiver = XCVR_EXTERNAL;
++		et_cmd->port        = PORT_FIBRE;
++		et_cmd->supported   = SUPPORTED_FIBRE;
++		et_cmd->advertising = ADVERTISED_FIBRE;
++		et_cmd->phy_address = phy_index;
+ 	} else {
+-		if ((nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) ||
+-		    (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_ARGUS)) {
+-			et_cmd->transceiver = XCVR_EXTERNAL;
+-			et_cmd->port        = PORT_FIBRE;
+-			et_cmd->supported   = SUPPORTED_FIBRE;
+-			et_cmd->advertising = ADVERTISED_FIBRE;
+-			et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
+-		} else {
+-			et_cmd->transceiver = XCVR_INTERNAL;
+-			et_cmd->supported   = SUPPORTED_10000baseT_Full;
+-			et_cmd->advertising = ADVERTISED_10000baseT_Full;
+-			et_cmd->phy_address = nesdev->mac_index;
+-		}
+-		et_cmd->speed = SPEED_10000;
+-		et_cmd->autoneg = AUTONEG_DISABLE;
++		et_cmd->transceiver = XCVR_INTERNAL;
++		et_cmd->supported   = SUPPORTED_10000baseT_Full;
++		et_cmd->advertising = ADVERTISED_10000baseT_Full;
++		et_cmd->phy_address = mac_index;
+ 	}
+-	et_cmd->maxtxpkt = 511;
+-	et_cmd->maxrxpkt = 511;
++	et_cmd->speed = SPEED_10000;
++	et_cmd->autoneg = AUTONEG_DISABLE;
+ 	return 0;
+ }
+ 
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0290_rc4.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0290_rc4.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0290_rc4.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,646 @@
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 91d5372..1856a21 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -1400,7 +1400,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		passive_state = atomic_add_return(1, &cm_node->passive_state);
+ 		if (passive_state ==  NES_SEND_RESET_EVENT)
+ 			create_event(cm_node, NES_CM_EVENT_RESET);
+-		cleanup_retrans_entry(cm_node);
+ 		cm_node->state = NES_CM_STATE_CLOSED;
+ 		dev_kfree_skb_any(skb);
+ 		break;
+@@ -1414,17 +1413,16 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ 		active_open_err(cm_node, skb, reset);
+ 		break;
+ 	case NES_CM_STATE_CLOSED:
+-		cleanup_retrans_entry(cm_node);
+ 		drop_packet(skb);
+ 		break;
++	case NES_CM_STATE_LAST_ACK:
++		cm_node->cm_id->rem_ref(cm_node->cm_id);
+ 	case NES_CM_STATE_TIME_WAIT:
+-		cleanup_retrans_entry(cm_node);
+ 		cm_node->state = NES_CM_STATE_CLOSED;
+ 		rem_ref_cm_node(cm_node->cm_core, cm_node);
+ 		drop_packet(skb);
+ 		break;
+ 	case NES_CM_STATE_FIN_WAIT1:
+-		cleanup_retrans_entry(cm_node);
+ 		nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
+ 	default:
+ 		drop_packet(skb);
+@@ -2713,7 +2711,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	/* associate the node with the QP */
+ 	nesqp->cm_node = (void *)cm_node;
+ 	cm_node->nesqp = nesqp;
+-	nes_add_ref(&nesqp->ibqp);
+ 
+ 	nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
+ 		nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
+@@ -2766,6 +2763,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 			nes_debug(NES_DBG_CM, "Unable to register memory region"
+ 					"for lSMM for cm_node = %p \n",
+ 					cm_node);
++			pci_free_consistent(nesdev->pcidev,
++					nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
++					nesqp->ietf_frame, nesqp->ietf_frame_pbase);
+ 			return -ENOMEM;
+ 		}
+ 
+@@ -2882,6 +2882,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 
+ 	/* notify OF layer that accept event was successfull */
+ 	cm_id->add_ref(cm_id);
++	nes_add_ref(&nesqp->ibqp);
+ 
+ 	cm_event.event = IW_CM_EVENT_ESTABLISHED;
+ 	cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
+@@ -2962,6 +2963,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	struct nes_device *nesdev;
+ 	struct nes_cm_node *cm_node;
+ 	struct nes_cm_info cm_info;
++	int apbvt_set = 0;
+ 
+ 	ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
+ 	if (!ibqp)
+@@ -2999,9 +3001,11 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		conn_param->private_data_len);
+ 
+ 	if (cm_id->local_addr.sin_addr.s_addr !=
+-		cm_id->remote_addr.sin_addr.s_addr)
++		cm_id->remote_addr.sin_addr.s_addr) {
+ 		nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ 			PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
++		apbvt_set = 1;
++	}
+ 
+ 	/* set up the connection params for the node */
+ 	cm_info.loc_addr = htonl(cm_id->local_addr.sin_addr.s_addr);
+@@ -3018,8 +3022,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		conn_param->private_data_len, (void *)conn_param->private_data,
+ 		&cm_info);
+ 	if (!cm_node) {
+-		if (cm_id->local_addr.sin_addr.s_addr !=
+-				cm_id->remote_addr.sin_addr.s_addr)
++		if (apbvt_set)
+ 			nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ 				PCI_FUNC(nesdev->pcidev->devfn),
+ 				NES_MANAGE_APBVT_DEL);
+@@ -3028,7 +3031,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		return -ENOMEM;
+ 	}
+ 
+-	cm_node->apbvt_set = 1;
++	cm_node->apbvt_set = apbvt_set;
+ 	nesqp->cm_node = cm_node;
+ 	cm_node->nesqp = nesqp;
+ 	nes_add_ref(&nesqp->ibqp);
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index f0fe63d..fe36c6f 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -525,6 +525,74 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
+ 	INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]);
+ 	INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]);
+ 
++	if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
++		u32 pcs_control_status0, pcs_control_status1;
++		u32 reset_value;
++		u32 i = 0;
++		u32 int_cnt = 0;
++		u32 ext_cnt = 0;
++		unsigned long flags;
++		u32 j = 0;
++
++		pcs_control_status0 = nes_read_indexed(nesdev,
++			NES_IDX_PHY_PCS_CONTROL_STATUS0);
++		pcs_control_status1 = nes_read_indexed(nesdev,
++			NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
++
++		for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
++			pcs_control_status0 = nes_read_indexed(nesdev,
++					NES_IDX_PHY_PCS_CONTROL_STATUS0);
++			pcs_control_status1 = nes_read_indexed(nesdev,
++					NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
++			if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
++			    || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
++				int_cnt++;
++			msleep(1);
++		}
++		if (int_cnt > 1) {
++			spin_lock_irqsave(&nesadapter->phy_lock, flags);
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
++			mh_detected++;
++			reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
++			reset_value |= 0x0000003d;
++			nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
++
++			while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
++				& 0x00000040) != 0x00000040) && (j++ < 5000));
++			spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
++
++			pcs_control_status0 = nes_read_indexed(nesdev,
++					NES_IDX_PHY_PCS_CONTROL_STATUS0);
++			pcs_control_status1 = nes_read_indexed(nesdev,
++					NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
++
++			for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
++				pcs_control_status0 = nes_read_indexed(nesdev,
++					NES_IDX_PHY_PCS_CONTROL_STATUS0);
++				pcs_control_status1 = nes_read_indexed(nesdev,
++					NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
++				if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
++					|| (0x0F000100 == (pcs_control_status1 & 0x0F000100))) {
++					if (++ext_cnt > int_cnt) {
++						spin_lock_irqsave(&nesadapter->phy_lock, flags);
++						nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1,
++								0x0000F088);
++						mh_detected++;
++						reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
++						reset_value |= 0x0000003d;
++						nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
++
++						while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
++							& 0x00000040) != 0x00000040) && (j++ < 5000));
++						spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
++						break;
++					}
++				}
++				msleep(1);
++			}
++		}
++	}
++
+ 	if (nesadapter->hw_rev == NE020_REV) {
+ 		init_timer(&nesadapter->mh_timer);
+ 		nesadapter->mh_timer.function = nes_mh_fix;
+@@ -693,6 +761,9 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
+ 			return 0;
+ 
+ 		/* init serdes 1 */
++		if (!(OneG_Mode && (nesadapter->phy_type[1] != NES_PHY_TYPE_PUMA_1G)))
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
++
+ 		switch (nesadapter->phy_type[1]) {
+ 		case NES_PHY_TYPE_ARGUS:
+ 		case NES_PHY_TYPE_SFP_D:
+@@ -700,21 +771,20 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
+ 			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
+ 			break;
+ 		case NES_PHY_TYPE_CX4:
+-			sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+-			sds &= 0xFFFFFFBF;
+-			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
+ 			if (wide_ppm_offset)
+ 				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
+-			else
+-				nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
+ 			break;
+ 		case NES_PHY_TYPE_PUMA_1G:
+ 			sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+ 			sds |= 0x000000100;
+ 			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
+ 		}
+-		if (!OneG_Mode)
++		if (!OneG_Mode) {
+ 			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
++			sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
++			sds &= 0xFFFFFFBF;
++			nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds);
++		}
+ 	} else {
+ 		/* init serdes 0 */
+ 		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
+@@ -842,6 +912,12 @@ static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_cou
+ 		u32temp &= 0x7fffffff;
+ 		u32temp |= 0x7fff0010;
+ 		nes_write_indexed(nesdev, 0x000021f8, u32temp);
++		if (port_count > 1) {
++			u32temp = nes_read_indexed(nesdev, 0x000023f8);
++			u32temp &= 0x7fffffff;
++			u32temp |= 0x7fff0010;
++			nes_write_indexed(nesdev, 0x000023f8, u32temp);
++		}
+ 	}
+ }
+ 
+@@ -1205,7 +1281,7 @@ int nes_init_phy(struct nes_device *nesdev)
+ {
+ 	struct nes_adapter *nesadapter = nesdev->nesadapter;
+ 	u32 counter = 0;
+-	u32 sds_common_control0;
++	u32 sds;
+ 	u32 mac_index = nesdev->mac_index;
+ 	u32 tx_config = 0;
+ 	u16 phy_data;
+@@ -1348,11 +1424,14 @@ int nes_init_phy(struct nes_device *nesdev)
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
+ 
+ 		/* reset serdes */
+-		sds_common_control0  = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
+-		sds_common_control0 |= 0x1;
+-		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
+-		sds_common_control0 &= 0xfffffffe;
+-		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
++		sds  = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0
++					+ mac_index * 0x200);
++		sds |= 0x1;
++		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0
++				  + mac_index * 0x200, sds);
++		sds &= 0xfffffffe;
++		nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0
++				  + mac_index * 0x200, sds);
+ 
+ 		counter = 0;
+ 		while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
+@@ -1917,7 +1996,7 @@ static void process_critical_error(struct nes_device *nesdev)
+ 			0x01010000 | (debug_error & 0x0000ffff));
+ 	if (crit_err_count++ > 10)
+ 		nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
+-	error_module = (u16) (debug_error & 0x0F00) >> 8;
++	error_module = (u16) (debug_error & 0x1F00) >> 8;
+ 	if (++nesdev->nesadapter->crit_error_count[error_module-1] >=
+ 			nes_max_critical_error_count) {
+ 		printk(KERN_ERR PFX "Masking off critical error for module "
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 83c999c..586b186 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1628,6 +1628,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+ 				nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
+ 			else
+ 				nescq->hw_cq.cq_number = nesvnic->mcrq_qp_id + nes_ucontext->mcrqf-1;
++
++			nescq->mcrqf = nes_ucontext->mcrqf;
+ 			nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ 		}
+ 		nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n",
+@@ -1683,6 +1685,12 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+ 		if (!context)
+ 			pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ 					nescq->hw_cq.cq_pbase);
++		else {
++			pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
++					    nespbl->pbl_vbase, nespbl->pbl_pbase);
++			kfree(nespbl);
++		}
++
+ 		nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ 		kfree(nescq);
+ 		return ERR_PTR(-ENOMEM);
+@@ -1706,6 +1714,11 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+ 				if (!context)
+ 					pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ 							nescq->hw_cq.cq_pbase);
++				else {
++					pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
++							    nespbl->pbl_vbase, nespbl->pbl_pbase);
++					kfree(nespbl);
++				}
+ 				nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ 				kfree(nescq);
+ 				return ERR_PTR(-ENOMEM);
+@@ -1723,6 +1736,11 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+ 				if (!context)
+ 					pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ 							nescq->hw_cq.cq_pbase);
++				else {
++					pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
++							    nespbl->pbl_vbase, nespbl->pbl_pbase);
++					kfree(nespbl);
++				}
+ 				nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ 				kfree(nescq);
+ 				return ERR_PTR(-ENOMEM);
+@@ -1775,6 +1793,11 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+ 		if (!context)
+ 			pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ 					nescq->hw_cq.cq_pbase);
++		else {
++			pci_free_consistent(nesdev->pcidev, nespbl->pbl_size,
++					    nespbl->pbl_vbase, nespbl->pbl_pbase);
++			kfree(nespbl);
++		}
+ 		nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ 		kfree(nescq);
+ 		return ERR_PTR(-EIO);
+@@ -1856,7 +1879,10 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
+ 	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ 	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ 		(nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16)));
+-	nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
++
++	if (!nescq->mcrqf)
++		nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
++
+ 	atomic_set(&cqp_request->refcount, 2);
+ 	nes_post_cqp_request(nesdev, cqp_request);
+ 
+@@ -2123,6 +2149,7 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ 	struct nes_root_vpbl root_vpbl;
+ 	u32 stag;
+ 	u32 i;
++	unsigned long mask;
+ 	u32 stag_index = 0;
+ 	u32 next_stag_index = 0;
+ 	u32 driver_key = 0;
+@@ -2151,6 +2178,9 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ 		return ERR_PTR(-E2BIG);
+ 	}
+ 
++	if ((buffer_list[0].addr ^ *iova_start) & ~PAGE_MASK)
++		return ERR_PTR(-EINVAL);
++
+ 	err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, nesadapter->max_mr,
+ 			&stag_index, &next_stag_index);
+ 	if (err) {
+@@ -2217,9 +2247,15 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ 			cur_pbl_index = 0;
+ 		}
+ 
+-		if (!buffer_list[i].size) {
++		mask = !buffer_list[i].size;
++		if (i != 0)
++			mask |= buffer_list[i].addr;
++		if (i != num_phys_buf - 1)
++			mask |= buffer_list[i].addr + buffer_list[i].size;
++
++		if (mask & ~PAGE_MASK) {
+ 			nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+-			nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
++			nes_debug(NES_DBG_MR, "Invalid buffer addr or size\n");
+ 			ibmr = ERR_PTR(-EINVAL);
+ 			kfree(nesmr);
+ 			goto reg_phys_err;
+@@ -3270,7 +3306,7 @@ fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselke
+  * nes_post_send
+  */
+ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+-		struct ib_send_wr **bad_wr)
++			 struct ib_send_wr **bad_wr)
+ {
+ 	u64 u64temp;
+ 	unsigned long flags = 0;
+@@ -3278,18 +3314,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ 	struct nes_device *nesdev = nesvnic->nesdev;
+ 	struct nes_qp *nesqp = to_nesqp(ibqp);
+ 	struct nes_hw_qp_wqe *wqe;
+-	int err;
++	int err = 0;
+ 	u32 qsize = nesqp->hwqp.sq_size;
+ 	u32 head;
+-	u32 wqe_misc;
+-	u32 wqe_count;
++	u32 wqe_misc = 0;
++	u32 wqe_count = 0;
+ 	u32 counter;
+-	u32 total_payload_length;
+-
+-	err = 0;
+-	wqe_misc = 0;
+-	wqe_count = 0;
+-	total_payload_length = 0;
+ 
+ 	if (nesqp->ibqp_state > IB_QPS_RTS)
+ 		return -EINVAL;
+@@ -3306,100 +3336,120 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ 		}
+ 
+ 		wqe = &nesqp->hwqp.sq_vbase[head];
+-		/* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n",
+-				nesqp->hwqp.qp_id, wqe, head); */
+ 		nes_fill_init_qp_wqe(wqe, nesqp, head);
+ 		u64temp = (u64)(ib_wr->wr_id);
+-		set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
+-					u64temp);
+-			switch (ib_wr->opcode) {
+-				case IB_WR_SEND:
+-					if (ib_wr->send_flags & IB_SEND_SOLICITED) {
+-						wqe_misc = NES_IWARP_SQ_OP_SENDSE;
+-					} else {
+-						wqe_misc = NES_IWARP_SQ_OP_SEND;
+-					}
+-					if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+-						err = -EINVAL;
+-						break;
+-					}
+-					if (ib_wr->send_flags & IB_SEND_FENCE) {
+-						wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+-					}
+-					if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+-							((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+-							(ib_wr->sg_list[0].length <= 64)) {
+-						memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+-							       (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+-						set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+-								ib_wr->sg_list[0].length);
+-						wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+-					} else {
+-						fill_wqe_sg_send(wqe, ib_wr, 1);
+-					}
++		set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp);
++		switch (ib_wr->opcode) {
++		case IB_WR_SEND:
++		case IB_WR_SEND_WITH_INV:
++			if (IB_WR_SEND == ib_wr->opcode) {
++				if (ib_wr->send_flags & IB_SEND_SOLICITED)
++					wqe_misc = NES_IWARP_SQ_OP_SENDSE;
++				else
++					wqe_misc = NES_IWARP_SQ_OP_SEND;
++			} else {
++				if (ib_wr->send_flags & IB_SEND_SOLICITED)
++					wqe_misc = NES_IWARP_SQ_OP_SENDSEINV;
++				else
++					wqe_misc = NES_IWARP_SQ_OP_SENDINV;
+ 
+-					break;
+-				case IB_WR_RDMA_WRITE:
+-					wqe_misc = NES_IWARP_SQ_OP_RDMAW;
+-					if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+-						nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
+-								ib_wr->num_sge,
+-								nesdev->nesadapter->max_sge);
+-						err = -EINVAL;
+-						break;
+-					}
+-					if (ib_wr->send_flags & IB_SEND_FENCE) {
+-						wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+-					}
++				set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
++						    ib_wr->ex.invalidate_rkey);
++			}
+ 
+-					set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+-							ib_wr->wr.rdma.rkey);
+-					set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+-							ib_wr->wr.rdma.remote_addr);
+-
+-					if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+-							((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+-							(ib_wr->sg_list[0].length <= 64)) {
+-						memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+-							       (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+-						set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+-								ib_wr->sg_list[0].length);
+-						wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+-					} else {
+-						fill_wqe_sg_send(wqe, ib_wr, 1);
+-					}
+-					wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
+-							wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
+-					break;
+-				case IB_WR_RDMA_READ:
+-					/* iWARP only supports 1 sge for RDMA reads */
+-					if (ib_wr->num_sge > 1) {
+-						nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
+-								ib_wr->num_sge);
+-						err = -EINVAL;
+-						break;
+-					}
+-					wqe_misc = NES_IWARP_SQ_OP_RDMAR;
+-					set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+-							ib_wr->wr.rdma.remote_addr);
+-					set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+-							ib_wr->wr.rdma.rkey);
+-					set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
+-							ib_wr->sg_list->length);
+-					set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+-							ib_wr->sg_list->addr);
+-					set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
+-							ib_wr->sg_list->lkey);
+-					break;
+-				default:
+-					/* error */
+-					err = -EINVAL;
+-					break;
++			if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
++				err = -EINVAL;
++				break;
+ 			}
+ 
+-		if (ib_wr->send_flags & IB_SEND_SIGNALED) {
+-			wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
++			if (ib_wr->send_flags & IB_SEND_FENCE)
++				wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
++
++			if ((ib_wr->send_flags & IB_SEND_INLINE) &&
++			    ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
++			     (ib_wr->sg_list[0].length <= 64)) {
++				memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
++				       (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
++				set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
++						    ib_wr->sg_list[0].length);
++				wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
++			} else
++				fill_wqe_sg_send(wqe, ib_wr, 1);
++
++			break;
++		case IB_WR_RDMA_WRITE:
++			wqe_misc = NES_IWARP_SQ_OP_RDMAW;
++			if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
++				nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
++					  ib_wr->num_sge, nesdev->nesadapter->max_sge);
++				err = -EINVAL;
++				break;
++			}
++
++			if (ib_wr->send_flags & IB_SEND_FENCE)
++				wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
++
++			set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
++					    ib_wr->wr.rdma.rkey);
++			set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
++					    ib_wr->wr.rdma.remote_addr);
++
++			if ((ib_wr->send_flags & IB_SEND_INLINE) &&
++			    ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
++			     (ib_wr->sg_list[0].length <= 64)) {
++				memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
++				       (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
++				set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
++						    ib_wr->sg_list[0].length);
++				wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
++			} else
++				fill_wqe_sg_send(wqe, ib_wr, 1);
++
++			wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
++				wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
++			break;
++		case IB_WR_RDMA_READ:
++		case IB_WR_RDMA_READ_WITH_INV:
++			/* iWARP only supports 1 sge for RDMA reads */
++			if (ib_wr->num_sge > 1) {
++				nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
++					  ib_wr->num_sge);
++				err = -EINVAL;
++				break;
++			}
++			if (ib_wr->opcode == IB_WR_RDMA_READ)
++				wqe_misc = NES_IWARP_SQ_OP_RDMAR;
++			else {
++				wqe_misc = NES_IWARP_SQ_OP_RDMAR_LOCINV;
++				set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
++						    ib_wr->ex.invalidate_rkey);
++			}
++
++			set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
++					    ib_wr->wr.rdma.remote_addr);
++			set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
++					    ib_wr->wr.rdma.rkey);
++			set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
++					    ib_wr->sg_list->length);
++			set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
++					    ib_wr->sg_list->addr);
++			set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
++					    ib_wr->sg_list->lkey);
++			break;
++		case IB_WR_LOCAL_INV:
++			wqe_misc = NES_IWARP_SQ_OP_LOCINV;
++			set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
++					    ib_wr->ex.invalidate_rkey);
++			break;
++		default:
++			/* error */
++			err = -EINVAL;
++			break;
+ 		}
++
++		if (ib_wr->send_flags & IB_SEND_SIGNALED)
++			wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
++
+ 		wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
+ 
+ 		ib_wr = ib_wr->next;
+@@ -3407,7 +3457,6 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ 		wqe_count++;
+ 		if (head >= qsize)
+ 			head = 0;
+-
+ 	}
+ 
+ 	nesqp->hwqp.sq_head = head;
+@@ -3416,7 +3465,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ 		counter = min(wqe_count, ((u32)255));
+ 		wqe_count -= counter;
+ 		nes_write32(nesdev->regs + NES_WQE_ALLOC,
+-				(counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
++			    (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&nesqp->lock, flags);
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
+index 5e48f67..41c07f2 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.h
++++ b/drivers/infiniband/hw/nes/nes_verbs.h
+@@ -112,6 +112,7 @@ struct nes_cq {
+ 	spinlock_t       lock;
+ 	u8               virtual_cq;
+ 	u8               pad[3];
++	u32		 mcrqf;
+ };
+ 
+ struct nes_wq {
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0300_1_inch.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0300_1_inch.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0300_1_inch.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,23 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index fe36c6f..7a78ca8 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -1371,13 +1371,14 @@ int nes_init_phy(struct nes_device *nesdev)
+ 		if (phy_type == NES_PHY_TYPE_ARGUS) {
+ 			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ 			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
++			nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
+ 		} else {
+ 			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
+ 			nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
++			nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
+ 		}
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
+-		nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
+ 
+ 		/* setup LEDs */
+ 		nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0310_no_dyn_int_mod.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0310_no_dyn_int_mod.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0310_no_dyn_int_mod.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,16 @@
+diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
+index 0696b4c..1fc7916 100644
+--- a/drivers/infiniband/hw/nes/nes.c
++++ b/drivers/infiniband/hw/nes/nes.c
+@@ -83,7 +83,7 @@ module_param(send_first, int, 0644);
+ MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
+ 
+ 
+-unsigned int nes_drv_opt = 0;
++unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD;
+ module_param(nes_drv_opt, int, 0644);
+ MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
+ 
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0320_fmr.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0320_fmr.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0320_fmr.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,721 @@
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 7a78ca8..24ed86b 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -422,8 +422,9 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
+ 
+ 	nesadapter->base_pd = 1;
+ 
+-	nesadapter->device_cap_flags =
+-		IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
++	nesadapter->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
++				       IB_DEVICE_MEM_WINDOW |
++				       IB_DEVICE_MEM_MGT_EXTENSIONS;
+ 
+ 	nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
+ 			[(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
+diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
+index c3654c6..1d5c59f 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.h
++++ b/drivers/infiniband/hw/nes/nes_hw.h
+@@ -540,11 +540,23 @@ enum nes_iwarp_sq_fmr_wqe_word_idx {
+ 	NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
+ };
+ 
++enum nes_iwarp_sq_fmr_opcodes {
++	NES_IWARP_SQ_FMR_WQE_ZERO_BASED			= (1<<6),
++	NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K		= (0<<7),
++	NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M		= (1<<7),
++	NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ	= (1<<16),
++	NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE 	= (1<<17),
++	NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ 	= (1<<18),
++	NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE = (1<<19),
++	NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND 	= (1<<20),
++};
++
++#define NES_IWARP_SQ_FMR_WQE_MR_LENGTH_HIGH_MASK	0xFF;
++
+ enum nes_iwarp_sq_locinv_wqe_word_idx {
+ 	NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
+ };
+ 
+-
+ enum nes_iwarp_rq_wqe_word_idx {
+ 	NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
+ 	NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
+@@ -1134,6 +1146,19 @@ struct nes_pbl {
+ 	/* TODO: need to add list for two level tables */
+ };
+ 
++#define NES_4K_PBL_CHUNK_SIZE	4096
++
++struct nes_fast_mr_wqe_pbl {
++	u64		*kva;
++	dma_addr_t	paddr;
++};
++
++struct nes_ib_fast_reg_page_list {
++	struct ib_fast_reg_page_list	ibfrpl;
++	struct nes_fast_mr_wqe_pbl 	nes_wqe_pbl;
++	u64 				pbl;
++};
++
+ struct nes_listener {
+ 	struct work_struct      work;
+ 	struct workqueue_struct *wq;
+diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
+index cc90c14..ce62f3c 100644
+--- a/drivers/infiniband/hw/nes/nes_user.h
++++ b/drivers/infiniband/hw/nes/nes_user.h
+@@ -86,6 +86,7 @@ enum iwnes_memreg_type {
+ 	IWNES_MEMREG_TYPE_CQ = 0x0002,
+ 	IWNES_MEMREG_TYPE_MW = 0x0003,
+ 	IWNES_MEMREG_TYPE_FMR = 0x0004,
++	IWNES_MEMREG_TYPE_FMEM = 0x0005,
+ };
+ 
+ struct nes_mem_reg_req {
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 586b186..7588a04 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -275,341 +275,235 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
+ }
+ 
+ 
+-/**
+- * nes_alloc_fmr
++/*
++ * nes_alloc_fast_mr
+  */
+-static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
+-		int ibmr_access_flags,
+-		struct ib_fmr_attr *ibfmr_attr)
++static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
++			     u32 stag, u32 page_count)
+ {
+-	unsigned long flags;
+-	struct nes_pd *nespd = to_nespd(ibpd);
+-	struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+-	struct nes_device *nesdev = nesvnic->nesdev;
+-	struct nes_adapter *nesadapter = nesdev->nesadapter;
+-	struct nes_fmr *nesfmr;
+-	struct nes_cqp_request *cqp_request;
+ 	struct nes_hw_cqp_wqe *cqp_wqe;
++	struct nes_cqp_request *cqp_request;
++	unsigned long flags;
+ 	int ret;
+-	u32 stag;
+-	u32 stag_index = 0;
+-	u32 next_stag_index = 0;
+-	u32 driver_key = 0;
++	struct nes_adapter *nesadapter = nesdev->nesadapter;
+ 	u32 opcode = 0;
+-	u8 stag_key = 0;
+-	int i=0;
+-	struct nes_vpbl vpbl;
+-
+-	get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+-	stag_key = (u8)next_stag_index;
+-
+-	driver_key = 0;
+-
+-	next_stag_index >>= 8;
+-	next_stag_index %= nesadapter->max_mr;
+-
+-	ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+-			nesadapter->max_mr, &stag_index, &next_stag_index);
+-	if (ret) {
+-		goto failed_resource_alloc;
+-	}
+-
+-	nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
+-	if (!nesfmr) {
+-		ret = -ENOMEM;
+-		goto failed_fmr_alloc;
+-	}
+-
+-	nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
+-	if (ibfmr_attr->max_pages == 1) {
+-		/* use zero length PBL */
+-		nesfmr->nesmr.pbl_4k = 0;
+-		nesfmr->nesmr.pbls_used = 0;
+-	} else if (ibfmr_attr->max_pages <= 32) {
+-		/* use PBL 256 */
+-		nesfmr->nesmr.pbl_4k = 0;
+-		nesfmr->nesmr.pbls_used = 1;
+-	} else if (ibfmr_attr->max_pages <= 512) {
+-		/* use 4K PBLs */
+-		nesfmr->nesmr.pbl_4k = 1;
+-		nesfmr->nesmr.pbls_used = 1;
+-	} else {
+-		/* use two level 4K PBLs */
+-		/* add support for two level 256B PBLs */
+-		nesfmr->nesmr.pbl_4k = 1;
+-		nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
+-				((ibfmr_attr->max_pages & 511) ? 1 : 0);
+-	}
+-	/* Register the region with the adapter */
+-	spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+-
+-	/* track PBL resources */
+-	if (nesfmr->nesmr.pbls_used != 0) {
+-		if (nesfmr->nesmr.pbl_4k) {
+-			if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
+-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-				ret = -ENOMEM;
+-				goto failed_vpbl_avail;
+-			} else {
+-				nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
+-			}
+-		} else {
+-			if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
+-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-				ret = -ENOMEM;
+-				goto failed_vpbl_avail;
+-			} else {
+-				nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
+-			}
+-		}
+-	}
+-
+-	/* one level pbl */
+-	if (nesfmr->nesmr.pbls_used == 0) {
+-		nesfmr->root_vpbl.pbl_vbase = NULL;
+-		nes_debug(NES_DBG_MR,  "zero level pbl \n");
+-	} else if (nesfmr->nesmr.pbls_used == 1) {
+-		/* can change it to kmalloc & dma_map_single */
+-		nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+-				&nesfmr->root_vpbl.pbl_pbase);
+-		if (!nesfmr->root_vpbl.pbl_vbase) {
+-			spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-			ret = -ENOMEM;
+-			goto failed_vpbl_alloc;
+-		}
+-		nesfmr->leaf_pbl_cnt = 0;
+-		nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
+-				nesfmr->root_vpbl.pbl_vbase);
+-	}
+-	/* two level pbl */
+-	else {
+-		nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
+-				&nesfmr->root_vpbl.pbl_pbase);
+-		if (!nesfmr->root_vpbl.pbl_vbase) {
+-			spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-			ret = -ENOMEM;
+-			goto failed_vpbl_alloc;
+-		}
+-
+-		nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
+-		nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
+-		if (!nesfmr->root_vpbl.leaf_vpbl) {
+-			spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-			ret = -ENOMEM;
+-			goto failed_leaf_vpbl_alloc;
+-		}
+-
+-		nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
+-				" leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
+-				nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
+-
+-		for (i=0; i<nesfmr->leaf_pbl_cnt; i++)
+-			nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL;
+-
+-		for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
+-			vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+-					&vpbl.pbl_pbase);
+-
+-			if (!vpbl.pbl_vbase) {
+-				ret = -ENOMEM;
+-				spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+-				goto failed_leaf_vpbl_pages_alloc;
+-			}
+-
+-			nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
+-			nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
+-			nesfmr->root_vpbl.leaf_vpbl[i] = vpbl;
+-
+-			nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n",
+-					nesfmr->root_vpbl.pbl_vbase[i].pa_low,
+-					nesfmr->root_vpbl.pbl_vbase[i].pa_high,
+-					&nesfmr->root_vpbl.leaf_vpbl[i]);
+-		}
+-	}
+-	nesfmr->ib_qp = NULL;
+-	nesfmr->access_rights =0;
++	u16 major_code;
++	u64 region_length = page_count * PAGE_SIZE;
+ 
+-	stag = stag_index << 8;
+-	stag |= driver_key;
+-	stag += (u32)stag_key;
+ 
+-	spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ 	cqp_request = nes_get_cqp_request(nesdev);
+ 	if (cqp_request == NULL) {
+ 		nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+-		ret = -ENOMEM;
+-		goto failed_leaf_vpbl_pages_alloc;
++		return -ENOMEM;
+ 	}
++	nes_debug(NES_DBG_MR, "alloc_fast_reg_mr: page_count = %d, "
++			      "region_length = %llu\n",
++			      page_count, region_length);
+ 	cqp_request->waiting = 1;
+ 	cqp_wqe = &cqp_request->cqp_wqe;
+ 
+-	nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
+-			stag, stag_index);
+-
+-	opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
+-
+-	if (nesfmr->nesmr.pbl_4k == 1)
+-		opcode |= NES_CQP_STAG_PBL_BLK_SIZE;
+-
+-	if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) {
+-		opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE |
+-				NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN;
+-		nesfmr->access_rights |=
+-				NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE |
+-				NES_CQP_STAG_REM_ACC_EN;
++	spin_lock_irqsave(&nesadapter->pbl_lock, flags);
++	if (nesadapter->free_4kpbl > 0) {
++		nesadapter->free_4kpbl--;
++		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++	} else {
++		/* No 4kpbl's available: */
++		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++		nes_debug(NES_DBG_MR, "Out of Pbls\n");
++		nes_free_cqp_request(nesdev, cqp_request);
++		return -ENOMEM;
+ 	}
+ 
+-	if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) {
+-		opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ |
+-				NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN;
+-		nesfmr->access_rights |=
+-				NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ |
+-				NES_CQP_STAG_REM_ACC_EN;
+-	}
++	opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_LEAVE_PENDING |
++		 NES_CQP_STAG_MR | NES_CQP_STAG_PBL_BLK_SIZE |
++		 NES_CQP_STAG_VA_TO | NES_CQP_STAG_REM_ACC_EN;
++	/*
++	 * Note: The current OFED API does not support the zero based TO option.  If added
++	 * 	 then need to changed the NES_CQP_STAG_VA* option.  Also, the API does
++	 * 	 not support that ability to have the MR set for local access only when
++	 *	 created and not allow the SQ op to override. Given this the remote enable
++	 *	 must be set here.
++	 */
+ 
+ 	nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ 	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+-	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
+-	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
++	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, 1);
+ 
+-	cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] =
+-			cpu_to_le32((nesfmr->nesmr.pbls_used>1) ?
+-			(nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used);
++	cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
++			cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
++	cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
++			cpu_to_le32(nespd->pd_id & 0x00007fff);
++
++	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
++	set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, 0);
++	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, 0);
++	set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, 0);
++	set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (page_count * 8));
++	cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
++	barrier();
+ 
+ 	atomic_set(&cqp_request->refcount, 2);
+ 	nes_post_cqp_request(nesdev, cqp_request);
+ 
+ 	/* Wait for CQP */
+-	ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+-			NES_EVENT_TIMEOUT);
+-	nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
+-			" CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+-			stag, ret, cqp_request->major_code, cqp_request->minor_code);
+-
+-	if ((!ret) || (cqp_request->major_code)) {
+-		nes_put_cqp_request(nesdev, cqp_request);
+-		ret = (!ret) ? -ETIME : -EIO;
+-		goto failed_leaf_vpbl_pages_alloc;
+-	}
++	ret = wait_event_timeout(cqp_request->waitq,
++				 (0 != cqp_request->request_done),
++				 NES_EVENT_TIMEOUT);
++
++	nes_debug(NES_DBG_MR, "Allocate STag 0x%08X completed, "
++		  "wait_event_timeout ret = %u, CQP Major:Minor codes = "
++		  "0x%04X:0x%04X.\n", stag, ret, cqp_request->major_code,
++		  cqp_request->minor_code);
++	major_code = cqp_request->major_code;
+ 	nes_put_cqp_request(nesdev, cqp_request);
+-	nesfmr->nesmr.ibfmr.lkey = stag;
+-	nesfmr->nesmr.ibfmr.rkey = stag;
+-	nesfmr->attr = *ibfmr_attr;
+-
+-	return &nesfmr->nesmr.ibfmr;
+-
+-	failed_leaf_vpbl_pages_alloc:
+-	/* unroll all allocated pages */
+-	for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
+-		if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) {
+-			pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
+-					nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
+-		}
+-	}
+-	if (nesfmr->root_vpbl.leaf_vpbl)
+-		kfree(nesfmr->root_vpbl.leaf_vpbl);
+-
+-	failed_leaf_vpbl_alloc:
+-	if (nesfmr->leaf_pbl_cnt == 0) {
+-		if (nesfmr->root_vpbl.pbl_vbase)
+-			pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
+-					nesfmr->root_vpbl.pbl_pbase);
+-	} else
+-		pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
+-				nesfmr->root_vpbl.pbl_pbase);
+ 
+-	failed_vpbl_alloc:
+-	if (nesfmr->nesmr.pbls_used != 0) {
++	if (!ret || major_code) {
+ 		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+-		if (nesfmr->nesmr.pbl_4k)
+-			nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
+-		else
+-			nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
++		nesadapter->free_4kpbl++;
+ 		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ 	}
+ 
+-failed_vpbl_avail:
+-	kfree(nesfmr);
+-
+-	failed_fmr_alloc:
+-	nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+-
+-	failed_resource_alloc:
+-	return ERR_PTR(ret);
++	if (!ret)
++		return -ETIME;
++	else if (major_code)
++		return -EIO;
++	return 0;
+ }
+ 
+-
+-/**
+- * nes_dealloc_fmr
++/*
++ * nes_alloc_fast_reg_mr
+  */
+-static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
++struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
+ {
+-	unsigned long flags;
+-	struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
+-	struct nes_fmr *nesfmr = to_nesfmr(nesmr);
+-	struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
++	struct nes_pd *nespd = to_nespd(ibpd);
++	struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ 	struct nes_device *nesdev = nesvnic->nesdev;
+ 	struct nes_adapter *nesadapter = nesdev->nesadapter;
+-	int i = 0;
+-	int rc;
+ 
+-	/* free the resources */
+-	if (nesfmr->leaf_pbl_cnt == 0) {
+-		/* single PBL case */
+-		if (nesfmr->root_vpbl.pbl_vbase)
+-			pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
+-					nesfmr->root_vpbl.pbl_pbase);
+-	} else {
+-		for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) {
+-			pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
+-					nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
+-		}
+-		kfree(nesfmr->root_vpbl.leaf_vpbl);
+-		pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
+-				nesfmr->root_vpbl.pbl_pbase);
+-	}
+-	nesmr->ibmw.device = ibfmr->device;
+-	nesmr->ibmw.pd = ibfmr->pd;
+-	nesmr->ibmw.rkey = ibfmr->rkey;
+-	nesmr->ibmw.uobject = NULL;
++	u32 next_stag_index;
++	u8 stag_key = 0;
++	u32 driver_key = 0;
++	int err = 0;
++	u32 stag_index = 0;
++	struct nes_mr *nesmr;
++	u32 stag;
++	int ret;
++	struct ib_mr *ibmr;
++/*
++ * Note:  Set to always use a fixed length single page entry PBL.  This is to allow
++ *	 for the fast_reg_mr operation to always know the size of the PBL.
++ */
++	if (max_page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
++		return ERR_PTR(-E2BIG);
+ 
+-	rc = nes_dealloc_mw(&nesmr->ibmw);
++	get_random_bytes(&next_stag_index, sizeof(next_stag_index));
++	stag_key = (u8)next_stag_index;
++	next_stag_index >>= 8;
++	next_stag_index %= nesadapter->max_mr;
+ 
+-	if (nesfmr->nesmr.pbls_used != 0) {
+-		spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+-		if (nesfmr->nesmr.pbl_4k) {
+-			nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
+-			WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl);
+-		} else {
+-			nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
+-			WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl);
+-		}
+-		spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
++	err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
++				 nesadapter->max_mr, &stag_index,
++				 &next_stag_index);
++	if (err)
++		return ERR_PTR(err);
++
++	nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
++	if (!nesmr) {
++		nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
++		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	return rc;
+-}
++	stag = stag_index << 8;
++	stag |= driver_key;
++	stag += (u32)stag_key;
+ 
++	nes_debug(NES_DBG_MR, "Allocating STag 0x%08X index = 0x%08X\n",
++		  stag, stag_index);
+ 
+-/**
+- * nes_map_phys_fmr
+- */
+-static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+-		int list_len, u64 iova)
+-{
+-	return 0;
+-}
++	ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_page_list_len);
+ 
++	if (ret == 0) {
++		nesmr->ibmr.rkey = stag;
++		nesmr->ibmr.lkey = stag;
++		nesmr->mode = IWNES_MEMREG_TYPE_FMEM;
++		ibmr = &nesmr->ibmr;
++	} else {
++		kfree(nesmr);
++		nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
++		ibmr = ERR_PTR(-ENOMEM);
++	}
++	return ibmr;
++}
+ 
+-/**
+- * nes_unmap_frm
++/*
++ * nes_alloc_fast_reg_page_list
+  */
+-static int nes_unmap_fmr(struct list_head *ibfmr_list)
++static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
++							struct ib_device *ibdev,
++							int page_list_len)
+ {
+-	return 0;
++	struct nes_vnic *nesvnic = to_nesvnic(ibdev);
++	struct nes_device *nesdev = nesvnic->nesdev;
++	struct ib_fast_reg_page_list *pifrpl;
++	struct nes_ib_fast_reg_page_list *pnesfrpl;
++
++	if (page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
++		return ERR_PTR(-E2BIG);
++	/*
++	 * Allocate the ib_fast_reg_page_list structure, the
++	 * nes_fast_bpl structure, and the PLB table.
++	 */
++	pnesfrpl = kmalloc(sizeof(struct nes_ib_fast_reg_page_list) +
++			   page_list_len * sizeof(u64), GFP_KERNEL);
++
++	if (!pnesfrpl)
++		return ERR_PTR(-ENOMEM);
++
++	pifrpl = &pnesfrpl->ibfrpl;
++	pifrpl->page_list = &pnesfrpl->pbl;
++	pifrpl->max_page_list_len = page_list_len;
++	/*
++	 * Allocate the WQE PBL
++	 */
++	pnesfrpl->nes_wqe_pbl.kva = pci_alloc_consistent(nesdev->pcidev,
++							 page_list_len * sizeof(u64),
++							 &pnesfrpl->nes_wqe_pbl.paddr);
++
++	if (!pnesfrpl->nes_wqe_pbl.kva) {
++		kfree(pnesfrpl);
++		return ERR_PTR(-ENOMEM);
++	}
++	nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
++		  "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
++		  "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl,
++		  pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
++		  (void *)pnesfrpl->nes_wqe_pbl.paddr);
++
++	return pifrpl;
+ }
+ 
++/*
++ * nes_free_fast_reg_page_list
++ */
++static void nes_free_fast_reg_page_list(struct ib_fast_reg_page_list *pifrpl)
++{
++	struct nes_vnic *nesvnic = to_nesvnic(pifrpl->device);
++	struct nes_device *nesdev = nesvnic->nesdev;
+ 
++	struct nes_ib_fast_reg_page_list *pnesfrpl;
++	pnesfrpl = container_of(pifrpl, struct nes_ib_fast_reg_page_list, ibfrpl);
++	/*
++	 * Free the WQE PBL.
++	 */
++	pci_free_consistent(nesdev->pcidev,
++			    pifrpl->max_page_list_len * sizeof(u64),
++			    pnesfrpl->nes_wqe_pbl.kva,
++			    pnesfrpl->nes_wqe_pbl.paddr);
++	/*
++	 * Free the PBL structure
++	 */
++	kfree(pnesfrpl);
++}
+ 
+ /**
+  * nes_query_device
+@@ -3436,6 +3330,91 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ 			set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
+ 					    ib_wr->sg_list->lkey);
+ 			break;
++		case IB_WR_FAST_REG_MR:
++		{
++			int i;
++			int flags = ib_wr->wr.fast_reg.access_flags;
++			struct nes_ib_fast_reg_page_list *pnesfrpl =
++				container_of(ib_wr->wr.fast_reg.page_list,
++					     struct nes_ib_fast_reg_page_list,
++					     ibfrpl);
++			u64 *src_page_list = pnesfrpl->ibfrpl.page_list;
++			u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva;
++
++			if (ib_wr->wr.fast_reg.page_list_len >
++			    (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) {
++				nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n");
++				err = -EINVAL ;
++				break;
++			}
++			wqe_misc = NES_IWARP_SQ_OP_FAST_REG;
++			set_wqe_64bit_value(wqe->wqe_words,
++					    NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX,
++					    ib_wr->wr.fast_reg.iova_start);
++			set_wqe_32bit_value(wqe->wqe_words,
++					    NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
++					    ib_wr->wr.fast_reg.length);
++			set_wqe_32bit_value(wqe->wqe_words,
++					    NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
++					    ib_wr->wr.fast_reg.rkey);
++			/* Set page size: */
++			if (ib_wr->wr.fast_reg.page_shift == 12) {
++				wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K;
++			} else if (ib_wr->wr.fast_reg.page_shift == 21) {
++				wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M;
++			} else {
++				nes_debug(NES_DBG_IW_TX, "Invalid page shift,"
++					  " ib_wr=%u, max=1\n", ib_wr->num_sge);
++				err = -EINVAL;
++				break;
++			}
++			/* Set access_flags */
++			wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ;
++			if (flags & IB_ACCESS_LOCAL_WRITE)
++				wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE;
++
++			if (flags & IB_ACCESS_REMOTE_WRITE)
++				wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE;
++
++			if (flags & IB_ACCESS_REMOTE_READ)
++				wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ;
++
++			if (flags & IB_ACCESS_MW_BIND)
++				wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND;
++
++			/* Fill in PBL info: */
++			if (ib_wr->wr.fast_reg.page_list_len >
++			    pnesfrpl->ibfrpl.max_page_list_len) {
++				nes_debug(NES_DBG_IW_TX, "Invalid page list length,"
++					  " ib_wr=%p, value=%u, max=%u\n",
++					  ib_wr, ib_wr->wr.fast_reg.page_list_len,
++					  pnesfrpl->ibfrpl.max_page_list_len);
++				err = -EINVAL;
++				break;
++			}
++
++			set_wqe_64bit_value(wqe->wqe_words,
++					    NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX,
++					    pnesfrpl->nes_wqe_pbl.paddr);
++
++			set_wqe_32bit_value(wqe->wqe_words,
++					    NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX,
++					    ib_wr->wr.fast_reg.page_list_len * 8);
++
++			for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
++				dst_page_list[i] = cpu_to_le64(src_page_list[i]);
++
++			nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, "
++				  "length: %d, rkey: %0x, pgl_paddr: %p, "
++				  "page_list_len: %u\n",
++				  (void *)ib_wr->wr.fast_reg.iova_start,
++				  ib_wr->wr.fast_reg.length,
++				  ib_wr->wr.fast_reg.rkey,
++				  (void *)pnesfrpl->nes_wqe_pbl.paddr,
++				  ib_wr->wr.fast_reg.page_list_len,
++				  wqe_misc);
++			break;
++		}
+ 		case IB_WR_LOCAL_INV:
+ 			wqe_misc = NES_IWARP_SQ_OP_LOCINV;
+ 			set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
+@@ -3653,6 +3632,9 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+ 						nes_debug(NES_DBG_CQ, "Operation = Send.\n");
+ 						entry->opcode = IB_WC_SEND;
+ 						break;
++					case NES_IWARP_SQ_OP_FAST_REG:
++						entry->opcode = IB_WC_FAST_REG_MR;
++						break;
+ 				}
+ 			} else {
+ 				/* Working on a RQ Completion*/
+@@ -3802,11 +3784,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
+ 	nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
+ 	nesibdev->ibdev.bind_mw = nes_bind_mw;
+ 
+-	nesibdev->ibdev.alloc_fmr = nes_alloc_fmr;
+-	nesibdev->ibdev.unmap_fmr = nes_unmap_fmr;
+-	nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr;
+-	nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr;
+-
++	nesibdev->ibdev.alloc_fast_reg_mr = nes_alloc_fast_reg_mr;
++	nesibdev->ibdev.alloc_fast_reg_page_list = nes_alloc_fast_reg_page_list;
++	nesibdev->ibdev.free_fast_reg_page_list = nes_free_fast_reg_page_list;
+ 	nesibdev->ibdev.attach_mcast = nes_multicast_attach;
+ 	nesibdev->ibdev.detach_mcast = nes_multicast_detach;
+ 	nesibdev->ibdev.process_mad = nes_process_mad;
+-- 
+1.5.3.3
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0330_init_rd_atom.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0330_init_rd_atom.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nes_0330_init_rd_atom.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,13 @@
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 7588a04..a5db705 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -548,7 +548,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
+ 		default:
+ 			props->max_qp_rd_atom = 0;
+ 	}
+-	props->max_qp_init_rd_atom = props->max_qp_wr;
++	props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+ 	props->atomic_cap = IB_ATOMIC_NONE;
+ 	props->max_map_per_fmr = 1;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.28_26.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.28_26.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.28_26.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,26 @@
+commit d7fb120774f062ce7db439863ab5d4190d6f989c
+Author: Trond Myklebust <Trond.Myklebust at netapp.com>
+Date:   Mon Oct 6 20:08:56 2008 -0400
+
+    NFS: Don't use range_cyclic for data integrity syncs
+    
+    It is more efficient to write linearly starting from the beginning of the
+    file.
+    
+    Signed-off-by: Trond Myklebust <Trond.Myklebust at netapp.com>
+
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 3229e21..9f98458 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1427,8 +1427,9 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
+ 		.bdi = mapping->backing_dev_info,
+ 		.sync_mode = WB_SYNC_NONE,
+ 		.nr_to_write = LONG_MAX,
++		.range_start = 0,
++		.range_end = LLONG_MAX,
+ 		.for_writepages = 1,
+-		.range_cyclic = 1,
+ 	};
+ 	int ret;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_00.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_00.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_00.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,48 @@
+commit b1e1e158779f1d99c2cc18e466f6bf9099fc0853
+Author: Tom Talpey <tmtalpey at gmail.com>
+Date:   Wed Mar 11 14:37:55 2009 -0400
+
+    SVCRDMA: remove faulty assertions in rpc/rdma chunk validation.
+    
+    Certain client-provided RPCRDMA chunk alignments result in an
+    additional scatter/gather entry, which triggered nfs/rdma server
+    assertions incorrectly. OpenSolaris nfs/rdma client connectathon
+    testing was blocked by these in the special/locking section.
+    
+    Signed-off-by: Tom Talpey <tmtalpey at gmail.com>
+    Cc: Tom Tucker <tom at opengridcomputing.com>
+    Signed-off-by: Trond Myklebust <Trond.Myklebust at netapp.com>
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index a3334e3..d0bea98 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -191,7 +191,6 @@ static int map_xdr(struct svcxprt_rdma *xprt,
+ 		   struct xdr_buf *xdr,
+ 		   struct svc_rdma_req_map *vec)
+ {
+-	int sge_max = (xdr->len+PAGE_SIZE-1) / PAGE_SIZE + 3;
+ 	int sge_no;
+ 	u32 sge_bytes;
+ 	u32 page_bytes;
+@@ -235,7 +234,11 @@ static int map_xdr(struct svcxprt_rdma *xprt,
+ 		sge_no++;
+ 	}
+ 
+-	BUG_ON(sge_no > sge_max);
++	dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
++		"page_base %zd page_len %zd head_len %d tail_len %d\n",
++		sge_no, page_no, xdr->page_base, xdr->page_len,
++		xdr->head[0].iov_len, xdr->tail[0].iov_len);
++
+ 	vec->count = sge_no;
+ 	return 0;
+ }
+@@ -579,7 +582,6 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 			ctxt->sge[page_no+1].length = 0;
+ 	}
+ 	BUG_ON(sge_no > rdma->sc_max_sge);
+-	BUG_ON(sge_no > ctxt->count);
+ 	memset(&send_wr, 0, sizeof send_wr);
+ 	ctxt->wr_op = IB_WR_SEND;
+ 	send_wr.wr_id = (unsigned long)ctxt;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_01.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_01.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_01.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,66 @@
+commit b38ab40ad58c1fc43ea590d6342f6a6763ac8fb6
+Author: Tom Talpey <tmtalpey at gmail.com>
+Date:   Wed Mar 11 14:37:55 2009 -0400
+
+    XPRTRDMA: correct an rpc/rdma inline send marshaling error
+    
+    Certain client rpc's which contain both lengthy page-contained
+    metadata and a non-empty xdr_tail buffer require careful handling
+    to avoid overlapped memory copying. Rearranging of existing rpcrdma
+    marshaling code avoids it; this fixes an NFSv4 symlink creation error
+    detected with connectathon basic/test8 to multiple servers.
+    
+    Signed-off-by: Tom Talpey <tmtalpey at gmail.com>
+    Signed-off-by: Trond Myklebust <Trond.Myklebust at netapp.com>
+
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index 14106d2..e5e28d1 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -310,6 +310,19 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
+ 		__func__, pad, destp, rqst->rq_slen, curlen);
+ 
+ 	copy_len = rqst->rq_snd_buf.page_len;
++
++	if (rqst->rq_snd_buf.tail[0].iov_len) {
++		curlen = rqst->rq_snd_buf.tail[0].iov_len;
++		if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
++			memmove(destp + copy_len,
++				rqst->rq_snd_buf.tail[0].iov_base, curlen);
++			r_xprt->rx_stats.pullup_copy_count += curlen;
++		}
++		dprintk("RPC:       %s: tail destp 0x%p len %d\n",
++			__func__, destp + copy_len, curlen);
++		rqst->rq_svec[0].iov_len += curlen;
++	}
++
+ 	r_xprt->rx_stats.pullup_copy_count += copy_len;
+ 	npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
+ 	for (i = 0; copy_len && i < npages; i++) {
+@@ -332,17 +345,6 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
+ 		destp += curlen;
+ 		copy_len -= curlen;
+ 	}
+-	if (rqst->rq_snd_buf.tail[0].iov_len) {
+-		curlen = rqst->rq_snd_buf.tail[0].iov_len;
+-		if (destp != rqst->rq_snd_buf.tail[0].iov_base) {
+-			memcpy(destp,
+-				rqst->rq_snd_buf.tail[0].iov_base, curlen);
+-			r_xprt->rx_stats.pullup_copy_count += curlen;
+-		}
+-		dprintk("RPC:       %s: tail destp 0x%p len %d curlen %d\n",
+-			__func__, destp, copy_len, curlen);
+-		rqst->rq_svec[0].iov_len += curlen;
+-	}
+ 	/* header now contains entire send message */
+ 	return pad;
+ }
+@@ -656,7 +658,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
+ 		if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
+ 			curlen = rqst->rq_rcv_buf.tail[0].iov_len;
+ 		if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
+-			memcpy(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
++			memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
+ 		dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
+ 			__func__, srcp, copy_len, curlen);
+ 		rqst->rq_rcv_buf.tail[0].iov_len = curlen;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_02.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_02.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_02.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,70 @@
+commit 441e3e242903f9b190d5764bed73edb58f977413
+Author: Tom Talpey <tmtalpey at gmail.com>
+Date:   Wed Mar 11 14:37:56 2009 -0400
+
+    SUNRPC: dynamically load RPC transport modules on-demand
+    
+    Provide an api to attempt to load any necessary kernel RPC
+    client transport module automatically. By convention, the
+    desired module name is "xprt"+"transport name". For example,
+    when NFS mounting with "-o proto=rdma", attempt to load the
+    "xprtrdma" module.
+    
+    Signed-off-by: Tom Talpey <tmtalpey at gmail.com>
+    Cc: Chuck Lever <chuck.lever at oracle.com>
+    Signed-off-by: Trond Myklebust <Trond.Myklebust at netapp.com>
+
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index 11fc71d..2b0d960 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -235,6 +235,7 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
+  */
+ int			xprt_register_transport(struct xprt_class *type);
+ int			xprt_unregister_transport(struct xprt_class *type);
++int			xprt_load_transport(const char *);
+ void			xprt_set_retrans_timeout_def(struct rpc_task *task);
+ void			xprt_set_retrans_timeout_rtt(struct rpc_task *task);
+ void			xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 62098d1..d1afec6 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -152,6 +152,37 @@ out:
+ EXPORT_SYMBOL_GPL(xprt_unregister_transport);
+ 
+ /**
++ * xprt_load_transport - load a transport implementation
++ * @transport_name: transport to load
++ *
++ * Returns:
++ * 0:		transport successfully loaded
++ * -ENOENT:	transport module not available
++ */
++int xprt_load_transport(const char *transport_name)
++{
++	struct xprt_class *t;
++	char module_name[sizeof t->name + 5];
++	int result;
++
++	result = 0;
++	spin_lock(&xprt_list_lock);
++	list_for_each_entry(t, &xprt_list, list) {
++		if (strcmp(t->name, transport_name) == 0) {
++			spin_unlock(&xprt_list_lock);
++			goto out;
++		}
++	}
++	spin_unlock(&xprt_list_lock);
++	strcpy(module_name, "xprt");
++	strncat(module_name, transport_name, sizeof t->name);
++	result = request_module(module_name);
++out:
++	return result;
++}
++EXPORT_SYMBOL_GPL(xprt_load_transport);
++
++/**
+  * xprt_reserve_xprt - serialize write access to transports
+  * @task: task that is requesting access to the transport
+  *

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_03.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_03.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_03.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,51 @@
+commit a67d18f89f5782806135aad4ee012ff78d45aae7
+Author: Tom Talpey <tmtalpey at gmail.com>
+Date:   Wed Mar 11 14:37:56 2009 -0400
+
+    NFS: load the rpc/rdma transport module automatically
+    
+    When mounting an NFS/RDMA server with the "-o proto=rdma" or
+    "-o rdma" options, attempt to dynamically load the necessary
+    "xprtrdma" client transport module. Doing so improves usability,
+    while avoiding a static module dependency and any unnecesary
+    resources.
+    
+    Signed-off-by: Tom Talpey <tmtalpey at gmail.com>
+    Cc: Chuck Lever <chuck.lever at oracle.com>
+    Signed-off-by: Trond Myklebust <Trond.Myklebust at netapp.com>
+
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index d6686f4..0942fcb 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1018,6 +1018,7 @@ static int nfs_parse_mount_options(char *raw,
+ 		case Opt_rdma:
+ 			mnt->flags |= NFS_MOUNT_TCP; /* for side protocols */
+ 			mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
++			xprt_load_transport(p);
+ 			break;
+ 		case Opt_acl:
+ 			mnt->flags &= ~NFS_MOUNT_NOACL;
+@@ -1205,12 +1206,14 @@ static int nfs_parse_mount_options(char *raw,
+ 				/* vector side protocols to TCP */
+ 				mnt->flags |= NFS_MOUNT_TCP;
+ 				mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
++				xprt_load_transport(string);
+ 				break;
+ 			default:
+ 				errors++;
+ 				dfprintk(MOUNT, "NFS:   unrecognized "
+ 						"transport protocol\n");
+ 			}
++			kfree(string);
+ 			break;
+ 		case Opt_mountproto:
+ 			string = match_strdup(args);
+@@ -1218,7 +1221,6 @@ static int nfs_parse_mount_options(char *raw,
+ 				goto out_nomem;
+ 			token = match_token(string,
+ 					    nfs_xprt_protocol_tokens, args);
+-			kfree(string);
+ 
+ 			switch (token) {
+ 			case Opt_xprt_udp:

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_04.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_04.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_to_2.6.29_04.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,25 @@
+commit 2e3c230bc7149a6af65d26a0c312e230e0c33cc3
+Author: Tom Talpey <tmtalpey at gmail.com>
+Date:   Thu Mar 12 22:21:21 2009 -0400
+
+    SVCRDMA: fix recent printk format warnings.
+    
+    printk formats in prior commit were reversed/incorrect.
+    Compiled without warning on x86 and x86_64, but detected on ppc.
+    
+    Signed-off-by: Tom Talpey <tmtalpey at gmail.com>
+    Signed-off-by: Trond Myklebust <Trond.Myklebust at netapp.com>
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index d0bea98..6c26a67 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -235,7 +235,7 @@ static int map_xdr(struct svcxprt_rdma *xprt,
+ 	}
+ 
+ 	dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
+-		"page_base %zd page_len %zd head_len %d tail_len %d\n",
++		"page_base %u page_len %u head_len %zu tail_len %zu\n",
+ 		sge_no, page_no, xdr->page_base, xdr->page_len,
+ 		xdr->head[0].iov_len, xdr->tail[0].iov_len);
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0100_lockd_ref_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0100_lockd_ref_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0100_lockd_ref_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,20 @@
+--- a/fs/nfsd/nfsctl.c	2009-03-29 18:53:02.000000000 -0500
++++ b/fs/nfsd/nfsctl.c	2009-03-31 10:44:32.000000000 -0500
+@@ -657,6 +657,9 @@ static ssize_t __write_ports(struct file
+ 					/* Give a reasonable perror msg for
+ 					 * bad transport string */
+ 					err = -EPROTONOSUPPORT;
++				if (err >= 0)
++					lockd_up(0);
++				nfsd_serv->sv_nrthreads--;
+ 			}
+ 			return err < 0 ? err : 0;
+ 		}
+@@ -679,6 +682,7 @@ static ssize_t __write_ports(struct file
+ 					svc_close_xprt(xprt);
+ 					svc_xprt_put(xprt);
+ 					err = 0;
++					lockd_down();
+ 				} else
+ 					err = -ENOTCONN;
+ 			}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0200_iova_truncate_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0200_iova_truncate_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0200_iova_truncate_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+xprtrdma: Do not truncate iova_start values in frmr registrations.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+A bad cast causes the iova_start, which in this case is a 64b DMA
+bus address, to be truncated on 32b systems.  This breaks frmrs on
+32b systems.  No cast is needed.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ net/sunrpc/xprtrdma/verbs.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 3b21e0c..3a374f5 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1489,7 +1489,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
+ 	memset(&frmr_wr, 0, sizeof frmr_wr);
+ 	frmr_wr.opcode = IB_WR_FAST_REG_MR;
+ 	frmr_wr.send_flags = 0;			/* unsignaled */
+-	frmr_wr.wr.fast_reg.iova_start = (unsigned long)seg1->mr_dma;
++	frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
+ 	frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
+ 	frmr_wr.wr.fast_reg.page_list_len = i;
+ 	frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0300_dma_direction.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0300_dma_direction.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0300_dma_direction.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,24 @@
+The nfs server rdma transport was mapping rdma read target pages for
+TO_DEVICE instead of FROM_DEVICE.  This causes data corruption on non
+cache-coherent systems if frmrs are used.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 629a287..42a6f9f 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -265,7 +265,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
+ 		frmr->page_list->page_list[page_no] =
+ 			ib_dma_map_single(xprt->sc_cm_id->device,
+ 					  page_address(rqstp->rq_arg.pages[page_no]),
+-					  PAGE_SIZE, DMA_TO_DEVICE);
++					  PAGE_SIZE, DMA_FROM_DEVICE);
+ 		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
+ 					 frmr->page_list->page_list[page_no]))
+ 			goto fatal_err;
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0400_clean_up_error_paths.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0400_clean_up_error_paths.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0400_clean_up_error_paths.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,55 @@
+commit 2f2f23aa57adb91b356f6e31f1a287ca9492c3ea
+Author: Steve Wise <swise at opengridcomputing.com>
+Date:   Wed Apr 29 13:38:55 2009 -0500
+
+    svcrdma: clean up error paths.
+    
+    These fixes resolved crashes due to resource leak BUG_ON checks. The
+    resource leaks were detected by introducing asynchronous transport errors.
+    
+    Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+    Signed-off-by: Tom Tucker <tom at opengridcomputing.com>
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 6c26a67..8b510c5 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -183,6 +183,7 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
+ 
+  fatal_err:
+ 	printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
++	vec->frmr = NULL;
+ 	svc_rdma_put_frmr(xprt, frmr);
+ 	return -EIO;
+ }
+@@ -516,6 +517,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 		       "svcrdma: could not post a receive buffer, err=%d."
+ 		       "Closing transport %p.\n", ret, rdma);
+ 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
++		svc_rdma_put_frmr(rdma, vec->frmr);
+ 		svc_rdma_put_context(ctxt, 0);
+ 		return -ENOTCONN;
+ 	}
+@@ -606,6 +608,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 	return 0;
+ 
+  err:
++	svc_rdma_unmap_dma(ctxt);
+ 	svc_rdma_put_frmr(rdma, vec->frmr);
+ 	svc_rdma_put_context(ctxt, 1);
+ 	return -EIO;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 3d810e7..4b0c2fa 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -520,8 +520,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
+ 	svc_xprt_get(&xprt->sc_xprt);
+ 	ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
+ 	if (ret) {
+-		svc_xprt_put(&xprt->sc_xprt);
++		svc_rdma_unmap_dma(ctxt);
+ 		svc_rdma_put_context(ctxt, 1);
++		svc_xprt_put(&xprt->sc_xprt);
+ 	}
+ 	return ret;
+ 

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0500_unmap_len_fix.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0500_unmap_len_fix.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0500_unmap_len_fix.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,94 @@
+svcrdma: dma unmap the correct length for the RPCRDMA header page.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+The svcrdma module was incorrectly unmapping the RPCRDMA header page.
+On IBM pserver systems this causes a resource leak that results in
+running out of bus address space (10 cthon iterations will reproduce it).
+The code was mapping the full page but only unmapping the actual header
+length.  The fix is to only map the header length.
+
+I also cleaned up the use of ib_dma_map_page() calls since the unmap
+logic always uses ib_dma_unmap_single().  I made these symmetrical.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+Signed-off-by: Tom Tucker <tom at opengridcomputing.com>
+---
+
+ net/sunrpc/xprtrdma/svc_rdma_sendto.c    |   12 ++++++------
+ net/sunrpc/xprtrdma/svc_rdma_transport.c |   10 +++++-----
+ 2 files changed, 11 insertions(+), 11 deletions(-)
+
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 8b510c5..f071b7e 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
+ 		page_bytes -= sge_bytes;
+ 
+ 		frmr->page_list->page_list[page_no] =
+-			ib_dma_map_page(xprt->sc_cm_id->device, page, 0,
++			ib_dma_map_single(xprt->sc_cm_id->device, 
++					  page_address(page),
+ 					  PAGE_SIZE, DMA_TO_DEVICE);
+ 		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
+ 					 frmr->page_list->page_list[page_no]))
+@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
+ 
+ 	/* Prepare the SGE for the RPCRDMA Header */
++	ctxt->sge[0].lkey = rdma->sc_dma_lkey;
++	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
+ 	ctxt->sge[0].addr =
+-		ib_dma_map_page(rdma->sc_cm_id->device,
+-				page, 0, PAGE_SIZE, DMA_TO_DEVICE);
++		ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
++				  ctxt->sge[0].length, DMA_TO_DEVICE);
+ 	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
+ 		goto err;
+ 	atomic_inc(&rdma->sc_dma_used);
+ 
+ 	ctxt->direction = DMA_TO_DEVICE;
+ 
+-	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
+-	ctxt->sge[0].lkey = rdma->sc_dma_lkey;
+-
+ 	/* Determine how many of our SGE are to be transmitted */
+ 	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
+ 		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 4b0c2fa..5151f9f 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
+ 		BUG_ON(sge_no >= xprt->sc_max_sge);
+ 		page = svc_rdma_get_page();
+ 		ctxt->pages[sge_no] = page;
+-		pa = ib_dma_map_page(xprt->sc_cm_id->device,
+-				     page, 0, PAGE_SIZE,
++		pa = ib_dma_map_single(xprt->sc_cm_id->device,
++				     page_address(page), PAGE_SIZE,
+ 				     DMA_FROM_DEVICE);
+ 		if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
+ 			goto err_put_ctxt;
+@@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
+ 	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
+ 
+ 	/* Prepare SGE for local address */
+-	sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
+-				   p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
++	sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
++				   page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
+ 	if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
+ 		put_page(p);
+ 		return;
+@@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
+ 	if (ret) {
+ 		dprintk("svcrdma: Error %d posting send for protocol error\n",
+ 			ret);
+-		ib_dma_unmap_page(xprt->sc_cm_id->device,
++		ib_dma_unmap_single(xprt->sc_cm_id->device,
+ 				  sge.addr, PAGE_SIZE,
+ 				  DMA_FROM_DEVICE);
+ 		svc_rdma_put_context(ctxt, 1);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0600_access_local_write_cx.patch
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0600_access_local_write_cx.patch	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/kernel_patches/fixes/nfsrdma_zz_0600_access_local_write_cx.patch	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,21 @@
+svcrdma: fix access flags for FRMR write operation.
+
+For write operation, ConnectX FRMR requires to have local write
+enable together with remote rdma_write
+
+Signed-off-by: Vu Pham <vu at mellanox.com>
+---
+
+diff -Naur a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+--- a/net/sunrpc/xprtrdma/verbs.c	2009-05-18 12:52:42.000000000 -0700
++++ b/net/sunrpc/xprtrdma/verbs.c	2009-05-18 12:52:10.000000000 -0700
+@@ -1495,7 +1495,8 @@
+ 	frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
+ 	frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT;
+ 	frmr_wr.wr.fast_reg.access_flags = (writing ?
+-				IB_ACCESS_REMOTE_WRITE : IB_ACCESS_REMOTE_READ);
++				(IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE)
++				: IB_ACCESS_REMOTE_READ);
+ 	frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
+ 	DECR_CQCOUNT(&r_xprt->rx_ep);
+ 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Kconfig
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Kconfig	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Kconfig	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,11 +1,13 @@
 
 config RDS
 	tristate "Reliable Datagram Sockets (RDS) (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
+	depends on INET && INFINIBAND_IPOIB && EXPERIMENTAL
 	---help---
 	  RDS provides reliable, sequenced delivery of datagrams
-	  over Infiniband or TCP.
+	  over Infiniband.
 
-config RDS_IB
-	tristate "  RDS over Infiniband"
-	depends RDS
+config RDS_DEBUG
+        bool "Debugging messages"
+	depends on RDS
+        default n
+

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Makefile
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Makefile	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/Makefile	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,15 +1,14 @@
 obj-$(CONFIG_RDS) += rds.o
- 
 rds-y :=	af_rds.o bind.o cong.o connection.o info.o message.o   \
 			recv.o send.o stats.o sysctl.o threads.o transport.o \
-			loop.o page.o rdma.o
+			loop.o page.o rdma.o \
+			rdma_transport.o \
+			ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
+			ib_sysctl.o ib_rdma.o \
+			iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \
+			iw_sysctl.o iw_rdma.o
 
-# we don't *quite* have modular transports yet
-ifeq ($(CONFIG_RDS_IB), m)
-rds-y +=	ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
-		ib_sysctl.o ib_rdma.o
-endif
-
 ifeq ($(CONFIG_RDS_DEBUG), y)
 EXTRA_CFLAGS += -DDEBUG
 endif
+

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/af_rds.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/af_rds.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/af_rds.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -40,6 +40,7 @@
 
 #include "rds.h"
 #include "rdma.h"
+#include "rdma_transport.h"
 
 static int enable_rdma = 1;
 
@@ -47,8 +48,8 @@
 MODULE_PARM_DESC(enable_rdma, " Enable RDMA operations support");
 
 /* this is just used for stats gathering :/ */
-static spinlock_t rds_sock_lock = SPIN_LOCK_UNLOCKED;
-unsigned long rds_sock_count;
+static DEFINE_SPINLOCK(rds_sock_lock);
+static unsigned long rds_sock_count;
 static LIST_HEAD(rds_sock_list);
 DECLARE_WAIT_QUEUE_HEAD(rds_poll_waitq);
 
@@ -73,16 +74,15 @@
 	rs = rds_sk_to_rs(sk);
 
 	sock_orphan(sk);
-	rds_remove_bound(rs);
-
 	/* Note - rds_clear_recv_queue grabs rs_recv_lock, so
 	 * that ensures the recv path has completed messing
 	 * with the socket. */
 	rds_clear_recv_queue(rs);
+	rds_cong_remove_socket(rs);
+	rds_remove_bound(rs);
 	rds_send_drop_to(rs, NULL);
 	rds_rdma_drop_keys(rs);
 	rds_notify_queue_get(rs, NULL);
-	rds_cong_remove_socket(rs);
 
 	spin_lock_irqsave(&rds_sock_lock, flags);
 	list_del_init(&rs->rs_item);
@@ -95,7 +95,7 @@
 	return 0;
 }
 
-/* 
+/*
  * Careful not to race with rds_release -> sock_orphan which clears sk_sleep.
  * _bh() isn't OK here, we're called from interrupt handlers.  It's probably OK
  * to wake the waitqueue after sk_sleep is clear as we hold a sock ref, but
@@ -153,8 +153,8 @@
  *
  * POLLOUT is asserted if there is room on the send queue. This does not mean
  * however, that the next sendmsg() call will succeed. If the application tries
- * to send to a congested destination, the system call may still fail (and return
- * ENOBUFS).
+ * to send to a congested destination, the system call may still fail (and
+ * return ENOBUFS).
  */
 static unsigned int rds_poll(struct file *file, struct socket *sock,
 			     poll_table *wait)
@@ -228,7 +228,8 @@
 	return ret;
 }
 
-static int rds_set_bool_option(unsigned char *optvar, char __user *optval, int optlen)
+static int rds_set_bool_option(unsigned char *optvar, char __user *optval,
+			       int optlen)
 {
 	int value;
 
@@ -240,7 +241,8 @@
 	return 0;
 }
 
-static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, int optlen)
+static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
+			    int optlen)
 {
 	int ret;
 
@@ -268,30 +270,30 @@
 		goto out;
 	}
 
-	switch(optname) {
-		case RDS_CANCEL_SENT_TO:
-			ret = rds_cancel_sent_to(rs, optval, optlen);
+	switch (optname) {
+	case RDS_CANCEL_SENT_TO:
+		ret = rds_cancel_sent_to(rs, optval, optlen);
+		break;
+	case RDS_GET_MR:
+		if (enable_rdma)
+			ret = rds_get_mr(rs, optval, optlen);
+		else
+			ret = -EOPNOTSUPP;
 			break;
-		case RDS_GET_MR:
-			if (enable_rdma)
-				ret = rds_get_mr(rs, optval, optlen);
-			else
-				ret = -EOPNOTSUPP;
-			break;
-		case RDS_FREE_MR:
-			if (enable_rdma)
-				ret = rds_free_mr(rs, optval, optlen);
-			else
-				ret = -EOPNOTSUPP;
-			break;
-		case RDS_RECVERR:
-			ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen);
-			break;
-		case RDS_CONG_MONITOR:
-			ret = rds_cong_monitor(rs, optval, optlen);
-			break;
-		default:
-			ret = -ENOPROTOOPT;
+	case RDS_FREE_MR:
+		if (enable_rdma)
+			ret = rds_free_mr(rs, optval, optlen);
+		else
+			ret = -EOPNOTSUPP;
+		break;
+	case RDS_RECVERR:
+		ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen);
+		break;
+	case RDS_CONG_MONITOR:
+		ret = rds_cong_monitor(rs, optval, optlen);
+		break;
+	default:
+		ret = -ENOPROTOOPT;
 	}
 out:
 	return ret;
@@ -311,24 +313,24 @@
 		goto out;
 	}
 
-	switch(optname) {
-		case RDS_INFO_FIRST ... RDS_INFO_LAST:
-			ret = rds_info_getsockopt(sock, optname, optval,
-						  optlen);
-			break;
+	switch (optname) {
+	case RDS_INFO_FIRST ... RDS_INFO_LAST:
+		ret = rds_info_getsockopt(sock, optname, optval,
+					  optlen);
+		break;
 
-		case RDS_RECVERR:
-			if (len < sizeof(int))
-				ret = -EINVAL;
-			else
-			if (put_user(rs->rs_recverr, (int __user *) optval)
-			 || put_user(sizeof(int), optlen))
-				ret = -EFAULT;
-			else
-				ret = 0;
-			break;
-		default:
-			break;
+	case RDS_RECVERR:
+		if (len < sizeof(int))
+			ret = -EINVAL;
+		else
+		if (put_user(rs->rs_recverr, (int __user *) optval)
+		 || put_user(sizeof(int), optlen))
+			ret = -EFAULT;
+		else
+			ret = 0;
+		break;
+	default:
+		break;
 	}
 
 out:
@@ -356,7 +358,7 @@
 		goto out;
 	}
 
-	if (sin->sin_addr.s_addr == INADDR_ANY) {
+	if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
 		ret = -EDESTADDRREQ;
 		goto out;
 	}
@@ -399,7 +401,8 @@
 };
 
 #ifndef KERNEL_HAS_PROTO_REGISTER
-static struct sock *sk_alloc_compat(int pf, gfp_t gfp, struct proto *prot, int zero_it)
+static struct sock *sk_alloc_compat(int pf, gfp_t gfp, struct proto *prot,
+				    int zero_it)
 {
 	struct rds_sock *rs;
 
@@ -570,25 +573,15 @@
 	spin_unlock_irqrestore(&rds_sock_lock, flags);
 }
 
-/*
- * The order is important here.
- *
- * rds_trans_stop_listening() is called before conn_exit so new connections
- * don't hit while existing ones are being torn down.
- *
- * rds_conn_exit() is before rds_trans_exit() as rds_conn_exit() calls into the
- * transports to free connections and incoming fragments as they're torn down.
- */
 static void __exit rds_exit(void)
 {
+	rds_rdma_exit();
 	sock_unregister(rds_family_ops.family);
 #ifdef KERNEL_HAS_PROTO_REGISTER
 	proto_unregister(&rds_proto);
 #endif /* KERNEL_HAS_PROTO_REGISTER */
-	rds_trans_stop_listening();
 	rds_conn_exit();
 	rds_cong_exit();
-	rds_trans_exit();
 	rds_sysctl_exit();
 	rds_threads_exit();
 	rds_stats_exit();
@@ -622,12 +615,9 @@
 	ret = rds_sysctl_init();
 	if (ret)
 		goto out_threads;
-	ret = rds_trans_init();
-	if (ret)
-		goto out_sysctl;
 	ret = rds_stats_init();
 	if (ret)
-		goto out_trans;
+		goto out_sysctl;
 #ifdef KERNEL_HAS_PROTO_REGISTER
 	ret = proto_register(&rds_proto, 1);
 	if (ret)
@@ -639,16 +629,21 @@
 
 	rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
 	rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
+
+	/* ib/iwarp transports currently compiled-in */
+	ret = rds_rdma_init();
+	if (ret)
+		goto out_sock;
 	goto out;
 
+out_sock:
+	sock_unregister(rds_family_ops.family);
 out_proto:
 #ifdef KERNEL_HAS_PROTO_REGISTER
 	proto_unregister(&rds_proto);
 out_stats:
 #endif /* KERNEL_HAS_PROTO_REGISTER */
 	rds_stats_exit();
-out_trans:
-	rds_trans_exit();
 out_sysctl:
 	rds_sysctl_exit();
 out_threads:
@@ -663,12 +658,11 @@
 module_init(rds_init);
 
 #define DRV_VERSION     "4.0"
-#define DRV_RELDATE     "July 28, 2008"
+#define DRV_RELDATE     "Feb 12, 2009"
 
-MODULE_AUTHOR("Zach Brown");
-MODULE_AUTHOR("Olaf Kirch");
+MODULE_AUTHOR("Oracle Corporation <rds-devel at oss.oracle.com>");
 MODULE_DESCRIPTION("RDS: Reliable Datagram Sockets"
-                   " v" DRV_VERSION " (" DRV_RELDATE ")");
+		   " v" DRV_VERSION " (" DRV_RELDATE ")");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_ALIAS_NETPROTO(PF_RDS);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/bind.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/bind.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/bind.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -36,15 +36,15 @@
 #include <linux/if_arp.h>
 #include "rds.h"
 
-/* 
+/*
  * XXX this probably still needs more work.. no INADDR_ANY, and rbtrees aren't
  * particularly zippy.
  *
  * This is now called for every incoming frame so we arguably care much more
  * about it than we used to.
  */
-static spinlock_t rds_bind_lock = SPIN_LOCK_UNLOCKED;
-struct rb_root rds_bind_tree = RB_ROOT;
+static DEFINE_SPINLOCK(rds_bind_lock);
+static struct rb_root rds_bind_tree = RB_ROOT;
 
 static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
 					   struct rds_sock *insert)
@@ -52,7 +52,8 @@
 	struct rb_node **p = &rds_bind_tree.rb_node;
 	struct rb_node *parent = NULL;
 	struct rds_sock *rs;
-	u64 cmp,needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
+	u64 cmp;
+	u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
 
 	while (*p) {
 		parent = *p;
@@ -101,7 +102,7 @@
 }
 
 /* returns -ve errno or +ve port */
-int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
+static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
 {
 	unsigned long flags;
 	int ret = -EADDRINUSE;
@@ -131,6 +132,11 @@
 		rs->rs_bound_addr = addr;
 		rs->rs_bound_port = *port;
 		rds_sock_addref(rs);
+
+		rdstrace(RDS_BIND, RDS_LOW,
+		  "rs %p binding to %u.%u.%u.%u:%d\n",
+		  rs, NIPQUAD(addr), (int)ntohs(*port));
+
 	}
 
 	spin_unlock_irqrestore(&rds_bind_lock, flags);
@@ -145,6 +151,11 @@
 	spin_lock_irqsave(&rds_bind_lock, flags);
 
 	if (rs->rs_bound_addr) {
+		rdstrace(RDS_BIND, RDS_LOW,
+		  "rs %p unbinding from %u.%u.%u.%u:%d\n",
+		  rs, NIPQUAD(rs->rs_bound_addr),
+		  (int)ntohs(rs->rs_bound_port));
+
 		rb_erase(&rs->rs_bound_node, &rds_bind_tree);
 		rds_sock_put(rs);
 		rs->rs_bound_addr = 0;
@@ -166,7 +177,7 @@
 	if (addr_len != sizeof(struct sockaddr_in) ||
 	    sin->sin_family != AF_INET ||
 	    rs->rs_bound_addr ||
-	    sin->sin_addr.s_addr == INADDR_ANY) {
+	    sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
 		ret = -EINVAL;
 		goto out;
 	}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/cong.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/cong.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/cong.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -32,12 +32,9 @@
  */
 #include <linux/types.h>
 #include <linux/rbtree.h>
-#include <asm/byteorder.h>
 
 #include "rds.h"
 
-extern wait_queue_head_t rds_poll_waitq;
-
 /*
  * This file implements the receive side of the unconventional congestion
  * management in RDS.
@@ -56,7 +53,7 @@
  * which ports on bound addresses are congested.  As the bitmap changes it is
  * sent through all the connections which terminate in the local address of the
  * bitmap which changed.
- * 
+ *
  * The bitmaps are allocated as connections are brought up.  This avoids
  * allocation in the interrupt handling path which queues messages on sockets.
  * The dense bitmaps let transports send the entire bitmap on any bitmap change
@@ -98,8 +95,8 @@
  *  Receive paths can mark ports congested from interrupt context so the
  *  lock masks interrupts.
  */
-static spinlock_t rds_cong_lock = SPIN_LOCK_UNLOCKED;
-struct rb_root rds_cong_tree = RB_ROOT;
+static DEFINE_SPINLOCK(rds_cong_lock);
+static struct rb_root rds_cong_tree = RB_ROOT;
 
 static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
 					       struct rds_cong_map *insert)
@@ -231,6 +228,8 @@
 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
 {
 	rdsdebug("waking map %p\n", map);
+	rdstrace(RDS_CONG, RDS_LOW, "waking map %p for %u.%u.%u.%u\n",
+	  map, NIPQUAD(map->m_addr));
 	rds_stats_inc(s_cong_update_received);
 	atomic_inc(&rds_cong_generation);
 	if (waitqueue_active(&map->m_waitq))
@@ -254,6 +253,7 @@
 		read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
 	}
 }
+EXPORT_SYMBOL_GPL(rds_cong_map_updated);
 
 int rds_cong_updated_since(unsigned long *recent)
 {
@@ -265,25 +265,6 @@
 	return 1;
 }
 
-void rds_cong_add_socket(struct rds_sock *rs)
-{
-	unsigned long flags;
-
-	write_lock_irqsave(&rds_cong_monitor_lock, flags);
-	if (list_empty(&rs->rs_cong_list))
-		list_add(&rs->rs_cong_list, &rds_cong_monitor);
-	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
-}
-
-void rds_cong_remove_socket(struct rds_sock *rs)
-{
-	unsigned long flags;
-
-	write_lock_irqsave(&rds_cong_monitor_lock, flags);
-	list_del_init(&rs->rs_cong_list);
-	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
-}
-
 /*
  * These should be using generic_{test,__{clear,set}}_le_bit() but some old
  * kernels don't have them.  Sigh.
@@ -310,11 +291,14 @@
 	unsigned long off;
 
 	rdsdebug("setting port %u on map %p\n", be16_to_cpu(port), map);
+	rdstrace(RDS_CONG, RDS_LOW,
+	  "setting congestion for %u.%u.%u.%u:%u in map %p\n",
+	  NIPQUAD(map->m_addr), ntohs(port), map);
 
 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-	__set_bit(off ^ LE_BIT_XOR, (void *)map->m_page_addrs[i]);
+	set_bit(off ^ LE_BIT_XOR, (void *)map->m_page_addrs[i]);
 }
 
 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -323,11 +307,14 @@
 	unsigned long off;
 
 	rdsdebug("clearing port %u on map %p\n", be16_to_cpu(port), map);
+	rdstrace(RDS_CONG, RDS_LOW,
+	  "clearing congestion for %u.%u.%u.%u:%u in map %p\n",
+	  NIPQUAD(map->m_addr), ntohs(port), map);
 
 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-	__clear_bit(off ^ LE_BIT_XOR, (void *)map->m_page_addrs[i]);
+	clear_bit(off ^ LE_BIT_XOR, (void *)map->m_page_addrs[i]);
 }
 
 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
@@ -343,10 +330,42 @@
 
 #undef LE_BIT_XOR
 
-int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs)
+void rds_cong_add_socket(struct rds_sock *rs)
 {
+	unsigned long flags;
+
+	write_lock_irqsave(&rds_cong_monitor_lock, flags);
+	if (list_empty(&rs->rs_cong_list))
+		list_add(&rs->rs_cong_list, &rds_cong_monitor);
+	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
+}
+
+void rds_cong_remove_socket(struct rds_sock *rs)
+{
+	unsigned long flags;
+	struct rds_cong_map *map;
+
+	write_lock_irqsave(&rds_cong_monitor_lock, flags);
+	list_del_init(&rs->rs_cong_list);
+	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
+
+	/* update congestion map for now-closed port */
+	spin_lock_irqsave(&rds_cong_lock, flags);
+	map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
+	spin_unlock_irqrestore(&rds_cong_lock, flags);
+
+	if (map && rds_cong_test_bit(map, rs->rs_bound_port))
+	{
+		rds_cong_clear_bit(map, rs->rs_bound_port);
+		rds_cong_queue_updates(map);
+	}
+}
+
+int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
+		  struct rds_sock *rs)
+{
 	if (!rds_cong_test_bit(map, port))
-	       return 0;
+		return 0;
 	if (nonblock) {
 		if (rs && rs->rs_cong_monitor) {
 			unsigned long flags;
@@ -360,7 +379,7 @@
 			/* Test again - a congestion update may have arrived in
 			 * the meantime. */
 			if (!rds_cong_test_bit(map, port))
-			       return 0;
+				return 0;
 		}
 		rds_stats_inc(s_cong_send_error);
 		return -ENOBUFS;
@@ -369,7 +388,7 @@
 	rds_stats_inc(s_cong_send_blocked);
 	rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
 
-	return wait_event_interruptible(map->m_waitq, 
+	return wait_event_interruptible(map->m_waitq,
 					!rds_cong_test_bit(map, port));
 }
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/connection.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/connection.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/connection.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -43,16 +43,16 @@
 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
 
 /* converting this to RCU is a chore for another day.. */
-static spinlock_t rds_conn_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rds_conn_lock);
 static unsigned long rds_conn_count;
-struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
+static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
 static struct kmem_cache *rds_conn_slab;
 
 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
 {
 	/* Pass NULL, don't need struct net for hash */
 	unsigned long hash = inet_ehashfn(NULL,
-					  be32_to_cpu(laddr), 0, 
+					  be32_to_cpu(laddr), 0,
 					  be32_to_cpu(faddr), 0);
 	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
 }
@@ -66,10 +66,10 @@
 {
 	int ret = 0;
 
-	if (down_trylock(&conn->c_send_sem))
+	if (!mutex_trylock(&conn->c_send_lock))
 		ret = 1;
 	else
-		up(&conn->c_send_sem);
+		mutex_unlock(&conn->c_send_lock);
 
 	return ret;
 }
@@ -101,6 +101,10 @@
  */
 void rds_conn_reset(struct rds_connection *conn)
 {
+	rdstrace(RDS_CONNECTION, RDS_MINIMAL,
+	  "connection %u.%u.%u.%u to %u.%u.%u.%u reset\n",
+	  NIPQUAD(conn->c_laddr),NIPQUAD(conn->c_faddr));
+
 	rds_stats_inc(s_conn_reset);
 	rds_send_reset(conn);
 	conn->c_flags = 0;
@@ -109,9 +113,6 @@
 	 * retransmitted packets from new packets, and will hand all
 	 * of them to the application. That is not consistent with the
 	 * reliability guarantees of RDS. */
-#if 0
-	conn->c_next_rx_seq = 0;
-#endif
 }
 
 /*
@@ -157,13 +158,12 @@
 	memset(conn, 0, sizeof(*conn));
 
 	INIT_HLIST_NODE(&conn->c_hash_node);
-	conn->c_version = RDS_PROTOCOL_3_0;
 	conn->c_laddr = laddr;
 	conn->c_faddr = faddr;
 	spin_lock_init(&conn->c_lock);
 	conn->c_next_tx_seq = 1;
 
-	init_MUTEX(&conn->c_send_sem);
+	mutex_init(&conn->c_send_lock);
 	INIT_LIST_HEAD(&conn->c_send_queue);
 	INIT_LIST_HEAD(&conn->c_retrans);
 
@@ -211,13 +211,20 @@
 	rdsdebug("allocated conn %p for %u.%u.%u.%u -> %u.%u.%u.%u\n", conn,
 		 NIPQUAD(laddr), NIPQUAD(faddr));
 
+	rdstrace(RDS_CONNECTION, RDS_MINIMAL,
+	  "allocated conn %p for %u.%u.%u.%u -> %u.%u.%u.%u over %s %s\n",
+	  conn, NIPQUAD(laddr), NIPQUAD(faddr),
+	  trans->t_name ? trans->t_name : "[unknown]",
+	  is_outgoing ? "(outgoing)" : "");
+
 	spin_lock_irqsave(&rds_conn_lock, flags);
 	if (parent == NULL) {
 		tmp = rds_conn_lookup(head, laddr, faddr, trans);
 		if (tmp == NULL)
 			hlist_add_head(&conn->c_hash_node, head);
 	} else {
-		if ((tmp = parent->c_passive) == NULL)
+		tmp = parent->c_passive;
+		if (!tmp)
 			parent->c_passive = conn;
 	}
 
@@ -241,17 +248,16 @@
 {
 	return __rds_conn_create(laddr, faddr, trans, gfp, 0);
 }
+EXPORT_SYMBOL_GPL(rds_conn_create);
 
 struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp)
 {
 	return __rds_conn_create(laddr, faddr, trans, gfp, 1);
 }
-
-EXPORT_SYMBOL_GPL(rds_conn_create);
 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
 
-static void __rds_conn_destroy(struct rds_connection *conn)
+void rds_conn_destroy(struct rds_connection *conn)
 {
 	struct rds_message *rm, *rtmp;
 
@@ -259,6 +265,12 @@
 		 "%u.%u.%u.%u\n", conn, NIPQUAD(conn->c_laddr),
 		 NIPQUAD(conn->c_faddr));
 
+	rdstrace(RDS_CONNECTION, RDS_MINIMAL,
+	  "freeing conn %p for %u.%u.%u.%u -> %u.%u.%u.%u\n",
+	  conn, NIPQUAD(conn->c_laddr), NIPQUAD(conn->c_faddr));
+
+	hlist_del_init(&conn->c_hash_node);
+
 	/* wait for the rds thread to shut it down */
 	atomic_set(&conn->c_state, RDS_CONN_ERROR);
 	cancel_delayed_work(&conn->c_conn_w);
@@ -290,11 +302,12 @@
 
 	rds_conn_count--;
 }
+EXPORT_SYMBOL_GPL(rds_conn_destroy);
 
 static void rds_conn_message_info(struct socket *sock, unsigned int len,
-			          struct rds_info_iterator *iter,
-			          struct rds_info_lengths *lens,
-			          int want_send)
+				  struct rds_info_iterator *iter,
+				  struct rds_info_lengths *lens,
+				  int want_send)
 {
 	struct hlist_head *head;
 	struct hlist_node *pos;
@@ -393,6 +406,7 @@
 
 	spin_unlock_irqrestore(&rds_conn_lock, flags);
 }
+EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
 
 static int rds_conn_info_visitor(struct rds_connection *conn,
 				  void *buffer)
@@ -432,7 +446,7 @@
 {
 	rds_conn_slab = kmem_cache_create("rds_connection",
 					  sizeof(struct rds_connection),
-				          0, 0, NULL);
+					  0, 0, NULL);
 	if (rds_conn_slab == NULL)
 		return -ENOMEM;
 
@@ -447,24 +461,10 @@
 
 void rds_conn_exit(void)
 {
-	struct hlist_head *head;
-	struct hlist_node *pos, *tmp;
-	struct rds_connection *conn;
-	size_t i;
+	rds_loop_exit();
 
-	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
-	     i++, head++) {
-		hlist_for_each_entry_safe(conn, pos, tmp, head, c_hash_node) {
+	WARN_ON(!hlist_empty(rds_conn_hash));
 
-			/* the conn won't reconnect once it's unhashed */
-			hlist_del_init(&conn->c_hash_node);
-
-			if (conn->c_passive)
-				__rds_conn_destroy(conn->c_passive);
-			__rds_conn_destroy(conn);
-		}
-	}
-
 	kmem_cache_destroy(rds_conn_slab);
 
 	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
@@ -482,6 +482,7 @@
 	atomic_set(&conn->c_state, RDS_CONN_ERROR);
 	queue_work(rds_wq, &conn->c_down_w);
 }
+EXPORT_SYMBOL_GPL(rds_conn_drop);
 
 /*
  * An error occurred on the connection

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -43,28 +43,30 @@
 
 unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE;
 unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */
+unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
 
 module_param(fmr_pool_size, int, 0444);
 MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA");
 module_param(fmr_message_size, int, 0444);
 MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
+module_param(rds_ib_retry_count, int, 0444);
+MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
 
 struct list_head rds_ib_devices;
 
-void rds_ib_add_one(struct ib_device *device);
-void rds_ib_remove_one(struct ib_device *device);
+/* NOTE: if also grabbing ibdev lock, grab this first */
+DEFINE_SPINLOCK(ib_nodev_conns_lock);
+LIST_HEAD(ib_nodev_conns);
 
-struct ib_client rds_ib_client = {
-	.name   = "rds_ib",
-	.add    = rds_ib_add_one,
-	.remove = rds_ib_remove_one
-};
-
 void rds_ib_add_one(struct ib_device *device)
 {
 	struct rds_ib_device *rds_ibdev;
 	struct ib_device_attr *dev_attr;
 
+	/* Only handle IB (no iWARP) devices */
+	if (device->node_type != RDMA_NODE_IB_CA)
+		return;
+
 	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
 	if (!dev_attr)
 		return;
@@ -80,7 +82,6 @@
 
 	spin_lock_init(&rds_ibdev->spinlock);
 
-	rds_ibdev->dma_local_lkey = !!(dev_attr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY);
 	rds_ibdev->max_wrs = dev_attr->max_qp_wr;
 	rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
 
@@ -88,7 +89,7 @@
 	rds_ibdev->fmr_page_size  = 1 << rds_ibdev->fmr_page_shift;
 	rds_ibdev->fmr_page_mask  = ~((u64) rds_ibdev->fmr_page_size - 1);
 	rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
-	rds_ibdev->max_fmrs = dev_attr->max_fmr?
+	rds_ibdev->max_fmrs = dev_attr->max_fmr ?
 			min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
 			fmr_pool_size;
 
@@ -97,21 +98,10 @@
 	if (IS_ERR(rds_ibdev->pd))
 		goto free_dev;
 
-	if (!rds_ibdev->dma_local_lkey) {
-		if (device->node_type != RDMA_NODE_RNIC) {
-			rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd,
-						IB_ACCESS_LOCAL_WRITE);
-		} else {
-			/* Why does it have to have these permissions? */
-			rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd,
-						IB_ACCESS_REMOTE_READ |
-						IB_ACCESS_REMOTE_WRITE |
-						IB_ACCESS_LOCAL_WRITE);
-		}
-		if (IS_ERR(rds_ibdev->mr))
-			goto err_pd;
-	} else
-		rds_ibdev->mr = NULL;
+	rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd,
+				      IB_ACCESS_LOCAL_WRITE);
+	if (IS_ERR(rds_ibdev->mr))
+		goto err_pd;
 
 	rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
 	if (IS_ERR(rds_ibdev->mr_pool)) {
@@ -128,8 +118,7 @@
 	goto free_attr;
 
 err_mr:
-	if (!rds_ibdev->dma_local_lkey)
-		ib_dereg_mr(rds_ibdev->mr);
+	ib_dereg_mr(rds_ibdev->mr);
 err_pd:
 	ib_dealloc_pd(rds_ibdev->pd);
 free_dev:
@@ -152,27 +141,32 @@
 		kfree(i_ipaddr);
 	}
 
-	rds_ib_remove_conns(rds_ibdev);
+	rds_ib_destroy_conns(rds_ibdev);
 
 	if (rds_ibdev->mr_pool)
 		rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
 
-	if (!rds_ibdev->dma_local_lkey)
-		ib_dereg_mr(rds_ibdev->mr);
+	ib_dereg_mr(rds_ibdev->mr);
 
 	while (ib_dealloc_pd(rds_ibdev->pd)) {
 		rdsdebug("%s-%d Failed to dealloc pd %p\n", __func__, __LINE__, rds_ibdev->pd);
 		msleep(1);
 	}
-	
+
 	list_del(&rds_ibdev->list);
 	kfree(rds_ibdev);
 }
 
+struct ib_client rds_ib_client = {
+	.name   = "rds_ib",
+	.add    = rds_ib_add_one,
+	.remove = rds_ib_remove_one
+};
+
 static int rds_ib_conn_info_visitor(struct rds_connection *conn,
 				    void *buffer)
 {
-	struct rds_info_ib_connection *iinfo = buffer;
+	struct rds_info_rdma_connection *iinfo = buffer;
 	struct rds_ib_connection *ic;
 
 	/* We will only ever look at IB transports */
@@ -209,7 +203,7 @@
 {
 	rds_for_each_conn_info(sock, len, iter, lens,
 				rds_ib_conn_info_visitor,
-				sizeof(struct rds_info_ib_connection));
+				sizeof(struct rds_info_rdma_connection));
 }
 
 
@@ -225,7 +219,7 @@
  */
 static int rds_ib_laddr_check(__be32 addr)
 {
-	int ret = -EADDRNOTAVAIL;
+	int ret;
 	struct rdma_cm_id *cm_id;
 	struct sockaddr_in sin;
 
@@ -233,39 +227,37 @@
 	 * IB and iWARP capable NICs.
 	 */
 	cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
-	if (cm_id == NULL) {
-		printk("rdma_create_id failed\n");
-		return -EADDRNOTAVAIL;
-	}
+	if (IS_ERR(cm_id))
+		return PTR_ERR(cm_id);
 
 	memset(&sin, 0, sizeof(sin));
 	sin.sin_family = AF_INET;
 	sin.sin_addr.s_addr = addr;
 
-	/* rdma_bind_addr will fail for non-IB/iWARP devices */
+	/* rdma_bind_addr will only succeed for IB & iWARP devices */
 	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
-	if (ret)
+	/* due to this, we will claim to support iWARP devices unless we
+	   check node_type. */
+	if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
 		ret = -EADDRNOTAVAIL;
 
 	rdsdebug("addr %u.%u.%u.%u ret %d node type %d\n",
-		 NIPQUAD(addr), ret, cm_id->device?cm_id->device->node_type:-1);
+		NIPQUAD(addr), ret,
+		cm_id->device ? cm_id->device->node_type : -1);
 
 	rdma_destroy_id(cm_id);
 
 	return ret;
 }
 
-/*
- * conns should have been freed up by the time we get here..
- */
-static void rds_ib_exit(void)
+void rds_ib_exit(void)
 {
 	rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
-	rds_ib_listen_stop();
-	rds_trans_unregister(&rds_ib_transport);
-	rds_ib_recv_exit();
-	rds_ib_sysctl_exit();
+	rds_ib_destroy_nodev_conns();
 	ib_unregister_client(&rds_ib_client);
+	rds_ib_sysctl_exit();
+	rds_ib_recv_exit();
+	rds_trans_unregister(&rds_ib_transport);
 }
 
 struct rds_transport rds_ib_transport = {
@@ -282,7 +274,9 @@
 	.inc_copy_to_user	= rds_ib_inc_copy_to_user,
 	.inc_purge		= rds_ib_inc_purge,
 	.inc_free		= rds_ib_inc_free,
-	.listen_stop		= rds_ib_listen_stop,
+	.cm_initiate_connect	= rds_ib_cm_initiate_connect,
+	.cm_handle_connect	= rds_ib_cm_handle_connect,
+	.cm_connect_complete	= rds_ib_cm_connect_complete,
 	.stats_info_copy	= rds_ib_stats_info_copy,
 	.exit			= rds_ib_exit,
 	.get_mr			= rds_ib_get_mr,
@@ -290,7 +284,7 @@
 	.free_mr		= rds_ib_free_mr,
 	.flush_mrs		= rds_ib_flush_mrs,
 	.t_owner		= THIS_MODULE,
-	.t_name			= "ib",
+	.t_name			= "infiniband",
 };
 
 int __init rds_ib_init(void)
@@ -315,16 +309,10 @@
 	if (ret)
 		goto out_recv;
 
-	ret = rds_ib_listen_init();
-	if (ret)
-		goto out_register;
-
 	rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
 
 	goto out;
 
-out_register:
-	rds_trans_unregister(&rds_ib_transport);
 out_recv:
 	rds_ib_recv_exit();
 out_sysctl:
@@ -334,3 +322,6 @@
 out:
 	return ret;
 }
+
+MODULE_LICENSE("GPL");
+

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,12 +1,11 @@
 #ifndef _RDS_IB_H
-#define _RDS_IB_H 
+#define _RDS_IB_H
 
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
 #include "rds.h"
+#include "rdma_transport.h"
 
-#define RDS_IB_RESOLVE_TIMEOUT_MS	5000
-
 #define RDS_FMR_SIZE			256
 #define RDS_FMR_POOL_SIZE		4096
 
@@ -16,10 +15,14 @@
 #define RDS_IB_DEFAULT_RECV_WR		1024
 #define RDS_IB_DEFAULT_SEND_WR		256
 
+#define RDS_IB_DEFAULT_RETRY_COUNT	2
+
 #define RDS_IB_SUPPORTED_PROTOCOLS	0x00000003	/* minor versions supported */
 
+extern struct list_head rds_ib_devices;
+
 /*
- * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to 
+ * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
  * try and minimize the amount of memory tied up both the device and
  * socket receive queues.
  */
@@ -72,14 +75,21 @@
 	atomic_t	w_free_ctr;
 };
 
+struct rds_ib_device;
+
 struct rds_ib_connection {
+
+	struct list_head	ib_node;
+	struct rds_ib_device	*rds_ibdev;
+	struct rds_connection	*conn;
+
 	/* alphabet soup, IBTA style */
 	struct rdma_cm_id	*i_cm_id;
 	struct ib_pd		*i_pd;
 	struct ib_mr		*i_mr;
 	struct ib_cq		*i_send_cq;
 	struct ib_cq		*i_recv_cq;
-	
+
 	/* tx */
 	struct rds_ib_work_ring	i_send_ring;
 	struct rds_message	*i_rm;
@@ -101,7 +111,7 @@
 	/* sending acks */
 	unsigned long		i_ack_flags;
 #ifndef KERNEL_HAS_ATOMIC64
-	spinlock_t		i_ack_lock;
+	spinlock_t		i_ack_lock;	/* protect i_ack_next */
 	u64			i_ack_next;	/* next ACK to send */
 #else
 	atomic64_t		i_ack_next;	/* next ACK to send */
@@ -111,7 +121,7 @@
 	struct ib_sge		i_ack_sge;
 	u64			i_ack_dma;
 	unsigned long		i_ack_queued;
- 
+
 	/* Flow control related information
 	 *
 	 * Our algorithm uses a pair variables that we need to access
@@ -123,10 +133,7 @@
 	atomic_t		i_credits;
 
 	/* Protocol version specific information */
-	unsigned int		i_flowctl : 1,	/* enable/disable flow ctl */
-				i_iwarp   : 1,	/* this is actually iWARP not IB */
-				i_fastreg : 1,	/* device supports fastreg */
-				i_dma_local_lkey : 1;
+	unsigned int		i_flowctl:1;	/* enable/disable flow ctl */
 
 	/* Batched completions */
 	unsigned int		i_unsignaled_wrs;
@@ -144,11 +151,6 @@
 	__be32			ipaddr;
 };
 
-struct rds_ib_devconn {
-	struct list_head	list;
-	struct rds_connection	*conn;
-};
-
 struct rds_ib_device {
 	struct list_head	list;
 	struct list_head	ipaddr_list;
@@ -164,9 +166,7 @@
 	unsigned int		max_fmrs;
 	int			max_sge;
 	unsigned int		max_wrs;
-	unsigned int		use_fastreg : 1,
-				dma_local_lkey : 1;
-	spinlock_t		spinlock;
+	spinlock_t		spinlock;	/* protect the above */
 };
 
 /* bits for i_ack_flags */
@@ -226,7 +226,7 @@
 }
 #define ib_dma_sync_sg_for_cpu	rds_ib_dma_sync_sg_for_cpu
 
-static void inline rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
+static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
 		struct scatterlist *sg, unsigned int sg_dma_len, int direction)
 {
 	unsigned int i;
@@ -240,10 +240,6 @@
 }
 #define ib_dma_sync_sg_for_device	rds_ib_dma_sync_sg_for_device
 
-static inline u32 rds_ib_local_dma_lkey(struct rds_ib_connection *ic)
-{
-	return (ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey);
-}
 
 /* ib.c */
 extern struct rds_transport rds_ib_transport;
@@ -253,7 +249,11 @@
 
 extern unsigned int fmr_pool_size;
 extern unsigned int fmr_message_size;
+extern unsigned int rds_ib_retry_count;
 
+extern spinlock_t ib_nodev_conns_lock;
+extern struct list_head ib_nodev_conns;
+
 /* ib_cm.c */
 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
 void rds_ib_conn_free(void *arg);
@@ -263,19 +263,34 @@
 int __init rds_ib_listen_init(void);
 void rds_ib_listen_stop(void);
 void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
+int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
+			     struct rdma_cm_event *event);
+int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
+void rds_ib_cm_connect_complete(struct rds_connection *conn,
+				struct rdma_cm_event *event);
 
+
 #define rds_ib_conn_error(conn, fmt...) \
-	__rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt )
+	__rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
 
 /* ib_rdma.c */
 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
-int rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
-void rds_ib_remove_conns(struct rds_ib_device *rds_ibdev);
+void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
+void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
+void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock);
+static inline void rds_ib_destroy_nodev_conns(void)
+{
+	__rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
+}
+static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
+{
+	__rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
+}
 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
-void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_ib_connection *iinfo);
+void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
-		    __be32 ip_addr, u32 *key_ret);
+		    struct rds_sock *rs, u32 *key_ret);
 void rds_ib_sync_mr(void *trans_private, int dir);
 void rds_ib_free_mr(void *trans_private, int invalidate);
 void rds_ib_flush_mrs(void);
@@ -305,6 +320,7 @@
 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
+int rds_ib_ring_low(struct rds_ib_work_ring *ring);
 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
 extern wait_queue_head_t rds_ib_ring_empty_wait;
@@ -312,7 +328,7 @@
 /* ib_send.c */
 void rds_ib_xmit_complete(struct rds_connection *conn);
 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
-	        unsigned int hdr_off, unsigned int sg, unsigned int off);
+		unsigned int hdr_off, unsigned int sg, unsigned int off);
 void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
 void rds_ib_send_init_ring(struct rds_ib_connection *ic);
 void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
@@ -320,7 +336,7 @@
 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
 int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
-			     u32 *adv_credits);
+			     u32 *adv_credits, int need_posted, int max_posted);
 
 /* ib_stats.c */
 RDS_DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
@@ -342,17 +358,25 @@
 /*
  * Helper functions for getting/setting the header and data SGEs in
  * RDS packets (not RDMA)
+ *
+ * From version 3.1 onwards, header is in front of data in the sge.
  */
 static inline struct ib_sge *
 rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
 {
-	return &sge[0];
+	if (ic->conn->c_version > RDS_PROTOCOL_3_0)
+		return &sge[0];
+	else
+		return &sge[1];
 }
 
 static inline struct ib_sge *
 rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
 {
-	return &sge[1];
+	if (ic->conn->c_version > RDS_PROTOCOL_3_0)
+		return &sge[1];
+	else
+		return &sge[0];
 }
 
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_cm.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_cm.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_cm.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -37,8 +37,6 @@
 #include "rds.h"
 #include "ib.h"
 
-static struct rdma_cm_id *rds_ib_listen_id;
-
 /*
  * Set the selected protocol version
  */
@@ -82,31 +80,17 @@
 {
 	int ret;
 
-	if (ic->i_flowctl) {
-		/* It seems we have to take a brief detour through SQD state
-		 * in order to change the RNR retry count. */
-		attr->qp_state = IB_QPS_SQD;
-		ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_STATE);
-		if (ret)
-			printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, SQD): err=%d\n", -ret);
-
-		attr->rnr_retry = 0;
-		ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_RNR_RETRY);
-		if (ret)
-			printk(KERN_NOTICE "ib_modify_qp(IB_QP_RNR_RETRY, 0): err=%d\n", -ret);
-	} else {
-		attr->min_rnr_timer = IB_RNR_TIMER_000_32;
-		ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
-		if (ret)
-			printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
-	}
+	attr->min_rnr_timer = IB_RNR_TIMER_000_32;
+	ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
+	if (ret)
+		printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
 }
 
 /*
  * Connection established.
  * We get here for both outgoing and incoming connection.
  */
-static void rds_ib_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
+void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
 {
 	const struct rds_ib_connect_private *dp = NULL;
 	struct rds_ib_connection *ic = conn->c_transport_data;
@@ -114,25 +98,37 @@
 	struct ib_qp_attr qp_attr;
 	int err;
 
-	if (event->param.conn.private_data_len) {
+	if (event->param.conn.private_data_len >= sizeof(*dp)) {
 		dp = event->param.conn.private_data;
 
-		rds_ib_set_protocol(conn,
+		/* make sure it isn't empty data */
+		if (dp->dp_protocol_major) {
+			rds_ib_set_protocol(conn,
 				RDS_PROTOCOL(dp->dp_protocol_major,
-					dp->dp_protocol_minor));
-		rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+				dp->dp_protocol_minor));
+			rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+		}
 	}
 
 	printk(KERN_NOTICE "RDS/IB: connected to %u.%u.%u.%u version %u.%u%s\n",
-			NIPQUAD(conn->c_laddr),
+			NIPQUAD(conn->c_faddr),
 			RDS_PROTOCOL_MAJOR(conn->c_version),
 			RDS_PROTOCOL_MINOR(conn->c_version),
-			ic->i_flowctl? ", flow control" : "");
+			ic->i_flowctl ? ", flow control" : "");
 
-	/* Tune RNR behavior - but not for iWARP */
-	if (!ic->i_iwarp)
-		rds_ib_tune_rnr(ic, &qp_attr);
+	/*
+	 * Init rings and fill recv. this needs to wait until protocol negotiation
+	 * is complete, since ring layout is different from 3.0 to 3.1.
+	 */
+	rds_ib_send_init_ring(ic);
+	rds_ib_recv_init_ring(ic);
+	/* Post receive buffers - as a side effect, this will update
+	 * the posted credit count. */
+	rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
 
+	/* Tune RNR behavior */
+	rds_ib_tune_rnr(ic, &qp_attr);
+
 	qp_attr.qp_state = IB_QPS_RTS;
 	err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
 	if (err)
@@ -143,9 +139,7 @@
 	err = rds_ib_update_ipaddr(rds_ibdev, conn->c_laddr);
 	if (err)
 		printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err);
-	err = rds_ib_add_conn(rds_ibdev, conn);
-	if (err)
-		printk(KERN_ERR "rds_ib_add_conn failed (%d)\n", err);
+	rds_ib_add_conn(rds_ibdev, conn);
 
 	/* If the peer gave us the last packet it saw, process this as if
 	 * we had received a regular ACK. */
@@ -160,19 +154,16 @@
 			struct rds_ib_connect_private *dp,
 			u32 protocol_version)
 {
-	struct rds_ib_connection *ic = conn->c_transport_data;
-
 	memset(conn_param, 0, sizeof(struct rdma_conn_param));
 	/* XXX tune these? */
 	conn_param->responder_resources = 1;
 	conn_param->initiator_depth = 1;
+	conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
+	conn_param->rnr_retry_count = 7;
 
-	if (!ic->i_iwarp) {
-		conn_param->retry_count = 7;
-		conn_param->rnr_retry_count = 7;
-	}
-
 	if (dp) {
+		struct rds_ib_connection *ic = conn->c_transport_data;
+
 		memset(dp, 0, sizeof(*dp));
 		dp->dp_saddr = conn->c_laddr;
 		dp->dp_daddr = conn->c_faddr;
@@ -187,6 +178,7 @@
 
 			credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
 			dp->dp_credit = cpu_to_be32(credits);
+			atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
 		}
 
 		conn_param->private_data = dp;
@@ -207,14 +199,15 @@
 	rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event);
 
 	switch (event->event) {
-		case IB_EVENT_COMM_EST:
-			rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
-			break;
-		default:
-			printk(KERN_WARNING "RDS/ib: unhandled QP event %u "
-			       "on connection to %u.%u.%u.%u\n", event->event,
-			       NIPQUAD(conn->c_faddr));
-			break;
+	case IB_EVENT_COMM_EST:
+		rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
+		break;
+	default:
+		rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u "
+			"- connection %u.%u.%u.%u->%u.%u.%u.%u...reconnecting\n",
+			event->event, NIPQUAD(conn->c_laddr),
+			NIPQUAD(conn->c_faddr));
+		break;
 	}
 }
 
@@ -298,7 +291,7 @@
 	attr.send_cq = ic->i_send_cq;
 	attr.recv_cq = ic->i_recv_cq;
 
-	/* 
+	/*
 	 * XXX this can fail if max_*_wr is too large?  Are we supposed
 	 * to back off until we get a value that the hardware can support?
 	 */
@@ -310,7 +303,7 @@
 
 	ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
 					   ic->i_send_ring.w_nr *
-					   	sizeof(struct rds_header),
+						sizeof(struct rds_header),
 					   &ic->i_send_hdrs_dma, GFP_KERNEL);
 	if (ic->i_send_hdrs == NULL) {
 		ret = -ENOMEM;
@@ -320,7 +313,7 @@
 
 	ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
 					   ic->i_recv_ring.w_nr *
-					   	sizeof(struct rds_header),
+						sizeof(struct rds_header),
 					   &ic->i_recv_hdrs_dma, GFP_KERNEL);
 	if (ic->i_recv_hdrs == NULL) {
 		ret = -ENOMEM;
@@ -342,7 +335,7 @@
 		rdsdebug("send allocation failed\n");
 		goto out;
 	}
-	rds_ib_send_init_ring(ic);
+	memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
 
 	ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
 	if (ic->i_recvs == NULL) {
@@ -350,14 +343,10 @@
 		rdsdebug("recv allocation failed\n");
 		goto out;
 	}
+	memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
 
-	rds_ib_recv_init_ring(ic);
 	rds_ib_recv_init_ack(ic);
 
-	/* Post receive buffers - as a side effect, this will update
-	 * the posted credit count. */
-	rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
-
 	rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
 		 ic->i_send_cq, ic->i_recv_cq);
 
@@ -365,19 +354,32 @@
 	return ret;
 }
 
-static u32 rds_ib_protocol_compatible(const struct rds_ib_connect_private *dp)
+static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
 {
+	const struct rds_ib_connect_private *dp = event->param.conn.private_data;
 	u16 common;
 	u32 version = 0;
 
-	/* rdma_cm private data is odd - when there is any private data in the
+	/*
+	 * rdma_cm private data is odd - when there is any private data in the
 	 * request, we will be given a pretty large buffer without telling us the
 	 * original size. The only way to tell the difference is by looking at
 	 * the contents, which are initialized to zero.
 	 * If the protocol version fields aren't set, this is a connection attempt
 	 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
-	 * We really should have changed this for OFED 1.3 :-( */
-	if (dp->dp_protocol_major == 0)
+	 * We really should have changed this for OFED 1.3 :-(
+	 */
+
+	/* Be paranoid. RDS always has privdata */
+	if (!event->param.conn.private_data_len) {
+		printk(KERN_NOTICE "RDS incoming connection has no private data, "
+			"rejecting\n");
+		return 0;
+	}
+
+	/* Even if len is crap *now* I still want to check it. -ASG */
+	if (event->param.conn.private_data_len < sizeof (*dp)
+	    || dp->dp_protocol_major == 0)
 		return RDS_PROTOCOL_3_0;
 
 	common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
@@ -395,26 +397,29 @@
 	return version;
 }
 
-static int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
+int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
 				    struct rdma_cm_event *event)
 {
+	__be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
+	__be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
 	const struct rds_ib_connect_private *dp = event->param.conn.private_data;
 	struct rds_ib_connect_private dp_rep;
 	struct rds_connection *conn = NULL;
 	struct rds_ib_connection *ic = NULL;
 	struct rdma_conn_param conn_param;
-	struct rds_ib_device *rds_ibdev;
 	u32 version;
 	int err, destroy = 1;
 
 	/* Check whether the remote protocol version matches ours. */
-	version = rds_ib_protocol_compatible(dp);
+	version = rds_ib_protocol_compatible(event);
 	if (!version)
 		goto out;
 
-	rdsdebug("saddr %u.%u.%u.%u daddr %u.%u.%u.%u RDSv%u.%u\n",
-		 NIPQUAD(dp->dp_saddr), NIPQUAD(dp->dp_daddr),
-		 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
+	rdsdebug("saddr %u.%u.%u.%u daddr %u.%u.%u.%u RDSv%u.%u lguid 0x%llx fguid "
+		 "0x%llx\n", NIPQUAD(dp->dp_saddr), NIPQUAD(dp->dp_daddr),
+		 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
+		 (unsigned long long)be64_to_cpu(lguid),
+		 (unsigned long long)be64_to_cpu(fguid));
 
 	conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
 			       GFP_KERNEL);
@@ -423,23 +428,15 @@
 		conn = NULL;
 		goto out;
 	}
-	ic = conn->c_transport_data;
 
-	rds_ib_set_protocol(conn, version);
-	rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
-
-	/* If the peer gave us the last packet it saw, process this as if
-	 * we had received a regular ACK. */
-	if (dp->dp_ack_seq)
-		rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
-
 	/*
 	 * The connection request may occur while the
 	 * previous connection exist, e.g. in case of failover.
 	 * But as connections may be initiated simultaneously
 	 * by both hosts, we have a random backoff mechanism -
-	 * see the comment above rds_queuereconnect()
+	 * see the comment above rds_queue_reconnect()
 	 */
+	mutex_lock(&conn->c_cm_lock);
 	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
 		if (rds_conn_state(conn) == RDS_CONN_UP) {
 			rdsdebug("incoming connect while connecting\n");
@@ -450,28 +447,32 @@
 			/* Wait and see - our connect may still be succeeding */
 			rds_ib_stats_inc(s_ib_connect_raced);
 		}
+		mutex_unlock(&conn->c_cm_lock);
 		goto out;
 	}
 
+	ic = conn->c_transport_data;
+
+	rds_ib_set_protocol(conn, version);
+	rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+
+	/* If the peer gave us the last packet it saw, process this as if
+	 * we had received a regular ACK. */
+	if (dp->dp_ack_seq)
+		rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
+
 	BUG_ON(cm_id->context);
 	BUG_ON(ic->i_cm_id);
 
 	ic->i_cm_id = cm_id;
 	cm_id->context = conn;
 
-	rds_ibdev = ib_get_client_data(cm_id->device, &rds_ib_client);
+	/* We got halfway through setting up the ib_connection, if we
+	 * fail now, we have to take the long route out of this mess. */
+	destroy = 0;
 
-	/* Remember whether this is IB or iWARP */
-	ic->i_iwarp = (cm_id->device->node_type == RDMA_NODE_RNIC);
-	ic->i_fastreg = rds_ibdev->use_fastreg;
-	ic->i_dma_local_lkey = rds_ibdev->dma_local_lkey;
-
- 	/* We got halfway through setting up the ib_connection, if we
- 	 * fail now, we have to take the long route out of this mess. */
- 	destroy = 0;
-
- 	err = rds_ib_setup_qp(conn);
- 	if (err) {
+	err = rds_ib_setup_qp(conn);
+	if (err) {
 		rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
 		goto out;
 	}
@@ -480,34 +481,21 @@
 
 	/* rdma_accept() calls rdma_reject() internally if it fails */
 	err = rdma_accept(cm_id, &conn_param);
+	mutex_unlock(&conn->c_cm_lock);
 	if (err) {
 		rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
- 		goto out;
- 	}
-
-	/* update ib_device with this local ipaddr & conn */
-	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
-	err = rds_ib_update_ipaddr(rds_ibdev, dp->dp_saddr);
-	if (err) {
-		rds_ib_conn_error(conn, "rds_ib_update_ipaddr failed (%d)\n", err);
 		goto out;
 	}
 
-	err = rds_ib_add_conn(rds_ibdev, conn);
-	if (err) {
-		rds_ib_conn_error(conn, "rds_ib_add_conn failed (%d)\n", err);
-		goto out;
-	}
+	return 0;
 
- 	return 0;
-
 out:
 	rdma_reject(cm_id, NULL, 0);
 	return destroy;
 }
 
 
-static int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
+int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
 {
 	struct rds_connection *conn = cm_id->context;
 	struct rds_ib_connection *ic = conn->c_transport_data;
@@ -537,114 +525,28 @@
 	 * the cm_id. We should certainly not do it as long as we still
 	 * "own" the cm_id. */
 	if (ret) {
-		struct rds_ib_connection *ic = conn->c_transport_data;
-
 		if (ic->i_cm_id == cm_id)
 			ret = 0;
 	}
 	return ret;
 }
 
-static int rds_ib_cm_event_handler(struct rdma_cm_id *cm_id,
-				   struct rdma_cm_event *event)
-{
-	/* this can be null in the listening path */
-	struct rds_connection *conn = cm_id->context;
-	int ret = 0;
-
-	rdsdebug("conn %p id %p handling event %u\n", conn, cm_id,
-		 event->event);
-
-	/* Prevent shutdown from tearing down the connection
-	 * while we're executing. */
-	if (conn) {
-		mutex_lock(&conn->c_cm_lock);
-
-		/* If the connection is being shut down, bail out
-		 * right away. We return 0 so cm_id doesn't get
-		 * destroyed prematurely */
-		if (atomic_read(&conn->c_state) == RDS_CONN_DISCONNECTING) {
-			/* Reject incoming connections while we're tearing
-			 * down an existing one. */
-			if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST)
-				ret = 1;
-			goto out;
-		}
-	}
-
-	switch (event->event) {
-		case RDMA_CM_EVENT_CONNECT_REQUEST:
-			ret = rds_ib_cm_handle_connect(cm_id, event);
-			break;
-
-		case RDMA_CM_EVENT_ADDR_RESOLVED:
-			/* XXX do we need to clean up if this fails? */
-			ret = rdma_resolve_route(cm_id,
-						 RDS_IB_RESOLVE_TIMEOUT_MS);
-			break;
-
-		case RDMA_CM_EVENT_ROUTE_RESOLVED:
-			/* XXX worry about racing with listen acceptance */
-			ret = rds_ib_cm_initiate_connect(cm_id);
-			break;
-
-		case RDMA_CM_EVENT_ESTABLISHED:
-			rds_ib_connect_complete(conn, event);
-			break;
-
-		case RDMA_CM_EVENT_ADDR_ERROR:
-		case RDMA_CM_EVENT_ROUTE_ERROR:
-		case RDMA_CM_EVENT_CONNECT_ERROR:
-		case RDMA_CM_EVENT_UNREACHABLE:
-		case RDMA_CM_EVENT_REJECTED:
-		case RDMA_CM_EVENT_DEVICE_REMOVAL:
-		case RDMA_CM_EVENT_ADDR_CHANGE:
-			if (conn)
-				rds_conn_drop(conn);
-			break;
-
-		case RDMA_CM_EVENT_DISCONNECTED:
-			rds_conn_drop(conn);
-			break;
-
-		default:
-			/* things like device disconnect? */
-			printk(KERN_ERR "unknown event %u\n", event->event);
-			BUG();
-			break;
-	}
-
-out:
-	if (conn) {
-		struct rds_ib_connection *ic = conn->c_transport_data;
-
-		/* If we return non-zero, we must to hang on to the cm_id */
-		BUG_ON(ic->i_cm_id == cm_id && ret);
-
-		mutex_unlock(&conn->c_cm_lock);
-	}
-
-	rdsdebug("id %p event %u handling ret %d\n", cm_id, event->event, ret);
-
-	return ret;
-}
-
 int rds_ib_conn_connect(struct rds_connection *conn)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
-	struct rds_ib_device *rds_ibdev;
 	struct sockaddr_in src, dest;
 	int ret;
 
 	/* XXX I wonder what affect the port space has */
-	ic->i_cm_id = rdma_create_id(rds_ib_cm_event_handler, conn,
+	/* delegate cm event handler to rdma_transport */
+	ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
 				     RDMA_PS_TCP);
 	if (IS_ERR(ic->i_cm_id)) {
 		ret = PTR_ERR(ic->i_cm_id);
 		ic->i_cm_id = NULL;
 		rdsdebug("rdma_create_id() failed: %d\n", ret);
 		goto out;
-	} 
+	}
 
 	rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
 
@@ -652,30 +554,13 @@
 	src.sin_addr.s_addr = (__force u32)conn->c_laddr;
 	src.sin_port = (__force u16)htons(0);
 
-	/* First, bind to the local address and device. */
-	ret = rdma_bind_addr(ic->i_cm_id, (struct sockaddr *) &src);
-	if (ret) {
-		rdsdebug("rdma_bind_addr(%u.%u.%u.%u) failed: %d\n",
-				NIPQUAD(conn->c_laddr), ret);
-		rdma_destroy_id(ic->i_cm_id);
-		ic->i_cm_id = NULL;
-		goto out;
-	}
-
-	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
-
-	/* Now check the device type and set i_iwarp */
-	ic->i_iwarp = (ic->i_cm_id->device->node_type == RDMA_NODE_RNIC);
-	ic->i_fastreg = rds_ibdev->use_fastreg;
-	ic->i_dma_local_lkey = rds_ibdev->dma_local_lkey;
-
 	dest.sin_family = AF_INET;
 	dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
 	dest.sin_port = (__force u16)htons(RDS_PORT);
 
 	ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
 				(struct sockaddr *)&dest,
-				RDS_IB_RESOLVE_TIMEOUT_MS);
+				RDS_RDMA_RESOLVE_TIMEOUT_MS);
 	if (ret) {
 		rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
 			 ret);
@@ -696,10 +581,9 @@
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	int err = 0;
-	struct ib_qp_attr qp_attr;
 
 	rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
-		 ic->i_pd, ic->i_send_cq, ic->i_recv_cq, 
+		 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
 		 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
 
 	if (ic->i_cm_id) {
@@ -722,14 +606,14 @@
 		if (ic->i_send_hdrs)
 			ib_dma_free_coherent(dev,
 					   ic->i_send_ring.w_nr *
-					   	sizeof(struct rds_header),
+						sizeof(struct rds_header),
 					   ic->i_send_hdrs,
 					   ic->i_send_hdrs_dma);
 
 		if (ic->i_recv_hdrs)
 			ib_dma_free_coherent(dev,
 					   ic->i_recv_ring.w_nr *
-					   	sizeof(struct rds_header),
+						sizeof(struct rds_header),
 					   ic->i_recv_hdrs,
 					   ic->i_recv_hdrs_dma);
 
@@ -750,15 +634,22 @@
 			ib_destroy_cq(ic->i_recv_cq);
 		rdma_destroy_id(ic->i_cm_id);
 
+		/*
+		 * Move connection back to the nodev list.
+		 */
+		if (ic->rds_ibdev)
+			rds_ib_remove_conn(ic->rds_ibdev, conn);
+
 		ic->i_cm_id = NULL;
 		ic->i_pd = NULL;
-                ic->i_mr = NULL;
+		ic->i_mr = NULL;
 		ic->i_send_cq = NULL;
 		ic->i_recv_cq = NULL;
 		ic->i_send_hdrs = NULL;
 		ic->i_recv_hdrs = NULL;
 		ic->i_ack = NULL;
 	}
+	BUG_ON(ic->rds_ibdev);
 
 	/* Clear pending transmit */
 	if (ic->i_rm) {
@@ -796,91 +687,62 @@
 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 {
 	struct rds_ib_connection *ic;
+	unsigned long flags;
 
 	/* XXX too lazy? */
 	ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
 	if (ic == NULL)
 		return -ENOMEM;
 
+	INIT_LIST_HEAD(&ic->ib_node);
 	mutex_init(&ic->i_recv_mutex);
 #ifndef KERNEL_HAS_ATOMIC64
 	spin_lock_init(&ic->i_ack_lock);
 #endif
 
-	/* 
+	/*
 	 * rds_ib_conn_shutdown() waits for these to be emptied so they
 	 * must be initialized before it can be called.
 	 */
 	rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
 	rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
 
+	ic->conn = conn;
 	conn->c_transport_data = ic;
 
+	spin_lock_irqsave(&ib_nodev_conns_lock, flags);
+	list_add_tail(&ic->ib_node, &ib_nodev_conns);
+	spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
+
+
 	rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
 	return 0;
 }
 
+/*
+ * Free a connection. Connection must be shut down and not set for reconnect.
+ */
 void rds_ib_conn_free(void *arg)
 {
 	struct rds_ib_connection *ic = arg;
+	spinlock_t	*lock_ptr;
+
 	rdsdebug("ic %p\n", ic);
-	kfree(ic);
-}
 
-int __init rds_ib_listen_init(void)
-{
-	struct sockaddr_in sin;
-	struct rdma_cm_id *cm_id;
-	int ret;
-
-	cm_id = rdma_create_id(rds_ib_cm_event_handler, NULL, RDMA_PS_TCP);
-	if (IS_ERR(cm_id)) {
-		ret = PTR_ERR(cm_id);
-		printk(KERN_ERR "RDS/ib: failed to setup listener, "
-		       "rdma_create_id() returned %d\n", ret);
-		goto out;
-	}
-
-	sin.sin_family = PF_INET,
-	sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
-	sin.sin_port = (__force u16)htons(RDS_PORT);
-
 	/*
-	 * XXX I bet this binds the cm_id to a device.  If we want to support
-	 * fail-over we'll have to take this into consideration.
+	 * Conn is either on a dev's list or on the nodev list.
+	 * A race with shutdown() or connect() would cause problems
+	 * (since rds_ibdev would change) but that should never happen.
 	 */
-	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
-	if (ret) {
-		printk(KERN_ERR "RDS/ib: failed to setup listener, "
-		       "rdma_bind_addr() returned %d\n", ret);
-		goto out;
-	}
+	lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
 
-	ret = rdma_listen(cm_id, 128);
-	if (ret) {
-		printk(KERN_ERR "RDS/ib: failed to setup listener, "
-		       "rdma_listen() returned %d\n", ret);
-		goto out;
-	}
+	spin_lock_irq(lock_ptr);
+	list_del(&ic->ib_node);
+	spin_unlock_irq(lock_ptr);
 
-	rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT);
-
-	rds_ib_listen_id = cm_id;
-	cm_id = NULL;
-out:
-	if (cm_id)
-		rdma_destroy_id(cm_id);
-	return ret;
+	kfree(ic);
 }
 
-void rds_ib_listen_stop(void)
-{
-	if (rds_ib_listen_id) {
-		rdsdebug("cm %p\n", rds_ib_listen_id);
-		rdma_destroy_id(rds_ib_listen_id);
-		rds_ib_listen_id = NULL;
-	}
-}
 
 /*
  * An error occurred on the connection

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rdma.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rdma.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rdma.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -37,8 +37,6 @@
 #include "ib.h"
 
 
-extern struct list_head rds_ib_devices;
-
 /*
  * This is stored as mr->r_trans_private.
  */
@@ -49,9 +47,9 @@
 	struct list_head	list;
 	unsigned int		remap_count;
 
-	struct scatterlist *	sg;
+	struct scatterlist	*sg;
 	unsigned int		sg_len;
-	u64 *			dma;
+	u64			*dma;
 	int			sg_dma_len;
 };
 
@@ -79,7 +77,7 @@
 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
 
-static struct rds_ib_device* rds_ib_get_device(__be32 ipaddr)
+static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
 {
 	struct rds_ib_device *rds_ibdev;
 	struct rds_ib_ipaddr *i_ipaddr;
@@ -141,46 +139,65 @@
 	return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
 }
 
-int rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
+void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
 {
-	struct rds_ib_devconn *i_conn;
+	struct rds_ib_connection *ic = conn->c_transport_data;
 
-	i_conn = kmalloc(sizeof *i_conn, GFP_KERNEL);
-	if (!i_conn)
-		return -ENOMEM;
+	/* conn was previously on the nodev_conns_list */
+	spin_lock_irq(&ib_nodev_conns_lock);
+	BUG_ON(list_empty(&ib_nodev_conns));
+	BUG_ON(list_empty(&ic->ib_node));
+	list_del(&ic->ib_node);
 
-	i_conn->conn = conn;
-
 	spin_lock_irq(&rds_ibdev->spinlock);
-	list_add_tail(&i_conn->list, &rds_ibdev->conn_list);
+	list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
 	spin_unlock_irq(&rds_ibdev->spinlock);
+	spin_unlock_irq(&ib_nodev_conns_lock);
 
-	return 0;
+	ic->rds_ibdev = rds_ibdev;
 }
 
-void rds_ib_remove_conns(struct rds_ib_device *rds_ibdev)
+void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
 {
-	struct rds_ib_devconn *i_conn, *c_next;
+	struct rds_ib_connection *ic = conn->c_transport_data;
 
+	/* place conn on nodev_conns_list */
+	spin_lock(&ib_nodev_conns_lock);
+
 	spin_lock_irq(&rds_ibdev->spinlock);
-	list_for_each_entry_safe(i_conn, c_next, &rds_ibdev->conn_list, list) {
-		if (rds_conn_up(i_conn->conn))
-			rds_conn_drop(i_conn->conn);
-		list_del(&i_conn->list);
-		kfree(i_conn);
-	}
+	BUG_ON(list_empty(&ic->ib_node));
+	list_del(&ic->ib_node);
 	spin_unlock_irq(&rds_ibdev->spinlock);
+
+	list_add_tail(&ic->ib_node, &ib_nodev_conns);
+
+	spin_unlock(&ib_nodev_conns_lock);
+
+	ic->rds_ibdev = NULL;
 }
 
+void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
+{
+	struct rds_ib_connection *ic, *_ic;
+	LIST_HEAD(tmp_list);
+
+	/* avoid calling conn_destroy with irqs off */
+	spin_lock_irq(list_lock);
+	list_splice(list, &tmp_list);
+	INIT_LIST_HEAD(list);
+	spin_unlock_irq(list_lock);
+
+	list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
+		if (ic->conn->c_passive)
+			rds_conn_destroy(ic->conn->c_passive);
+		rds_conn_destroy(ic->conn);
+	}
+}
+
 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
 {
 	struct rds_ib_mr_pool *pool;
 
-	/* For now, disable all RDMA service on iWARP. This check will
-	 * go away when we have a working patch. */
-	if (rds_ibdev->dev->node_type == RDMA_NODE_RNIC)
-		return NULL;
-
 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 	if (!pool)
 		return ERR_PTR(-ENOMEM);
@@ -208,12 +225,12 @@
 	return pool;
 }
 
-void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_ib_connection *iinfo)
+void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
 {
 	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
 
-	iinfo->rdma_fmr_max = pool->max_items;
-	iinfo->rdma_fmr_size = pool->fmr_attr.max_pages;
+	iinfo->rdma_mr_max = pool->max_items;
+	iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
 }
 
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
@@ -247,7 +264,8 @@
 	int err = 0, iter = 0;
 
 	while (1) {
-		if ((ibmr = rds_ib_reuse_fmr(pool)) != NULL)
+		ibmr = rds_ib_reuse_fmr(pool);
+		if (ibmr)
 			return ibmr;
 
 		/* No clean MRs - now we have the choice of either
@@ -320,7 +338,7 @@
 	sg_dma_len = ib_dma_map_sg(dev, sg, nents,
 				 DMA_BIDIRECTIONAL);
 	if (unlikely(!sg_dma_len)) {
-	        printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
+		printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
 		return -EBUSY;
 	}
 
@@ -330,7 +348,7 @@
 	for (i = 0; i < sg_dma_len; ++i) {
 		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
 		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
-	
+
 		if (dma_addr & ~rds_ibdev->fmr_page_mask) {
 			if (i > 0)
 				return -EINVAL;
@@ -351,7 +369,7 @@
 	if (page_cnt > fmr_message_size)
 		return -EINVAL;
 
-	dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_KERNEL);
+	dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC);
 	if (!dma_pages)
 		return -ENOMEM;
 
@@ -359,14 +377,14 @@
 	for (i = 0; i < sg_dma_len; ++i) {
 		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
 		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
-	
+
 		for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size)
-			dma_pages[page_cnt++] = 
+			dma_pages[page_cnt++] =
 				(dma_addr & rds_ibdev->fmr_page_mask) + j;
 	}
-				
+
 	ret = ib_map_phys_fmr(ibmr->fmr,
-				   dma_pages, page_cnt, io_addr);	
+				   dma_pages, page_cnt, io_addr);
 	if (ret)
 		goto out;
 
@@ -435,7 +453,7 @@
 	}
 }
 
-void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
+static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
 {
 	unsigned int pinned = ibmr->sg_len;
 
@@ -455,10 +473,7 @@
 	item_count = atomic_read(&pool->item_count);
 	if (free_all)
 		return item_count;
-#if 0
-	if (item_count > pool->max_items_soft)
-		return item_count - pool->max_items / 2;
-#endif
+
 	return 0;
 }
 
@@ -468,7 +483,7 @@
  * If the number of MRs allocated exceeds the limit, we also try
  * to free as many MRs as needed to get back to this limit.
  */
-int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
+static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
 {
 	struct rds_ib_mr *ibmr, *next;
 	LIST_HEAD(unmap_list);
@@ -530,7 +545,7 @@
 	return ret;
 }
 
-void rds_ib_mr_pool_flush_worker(struct work_struct *work)
+static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
 {
 	struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
 
@@ -545,16 +560,14 @@
 	unsigned long flags;
 
 	rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
-	if (!pool)
-		return;
 
 	/* Return it to the pool's free list */
 	spin_lock_irqsave(&pool->list_lock, flags);
-	if (ibmr->remap_count >= pool->fmr_attr.max_maps) {
+	if (ibmr->remap_count >= pool->fmr_attr.max_maps)
 		list_add(&ibmr->list, &pool->drop_list);
-	} else {
+	else
 		list_add(&ibmr->list, &pool->free_list);
-	}
+
 	atomic_add(ibmr->sg_len, &pool->free_pinned);
 	atomic_inc(&pool->dirty_count);
 	spin_unlock_irqrestore(&pool->list_lock, flags);
@@ -588,13 +601,13 @@
 }
 
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
-		    __be32 ip_addr, u32 *key_ret)
+		    struct rds_sock *rs, u32 *key_ret)
 {
 	struct rds_ib_device *rds_ibdev;
 	struct rds_ib_mr *ibmr = NULL;
 	int ret;
 
-	rds_ibdev = rds_ib_get_device(ip_addr);
+	rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
 	if (!rds_ibdev) {
 		ret = -ENODEV;
 		goto out;
@@ -617,11 +630,11 @@
 
 	ibmr->device = rds_ibdev;
 
- out:   
+ out:
 	if (ret) {
-	         if (ibmr) 
-		         rds_ib_free_mr(ibmr, 0);
-		 ibmr = ERR_PTR(ret);
+		if (ibmr)
+			rds_ib_free_mr(ibmr, 0);
+		ibmr = ERR_PTR(ret);
 	}
 	return ibmr;
 }

Deleted: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rds.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rds.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_rds.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2008 Oracle.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef IB_RDS_H
-#define IB_RDS_H
-
-#include <linux/types.h>
-
-/* These sparse annotated types shouldn't be in any user
- * visible header file. We should clean this up rather
- * than kludging around them. */
-#ifndef __KERNEL__
-#define __be16	u_int16_t
-#define __be32	u_int32_t
-#define __be64	u_int64_t
-#endif
-
-#define RDS_IB_ABI_VERSION		0x301
-
-/*
- * setsockopt/getsockopt for SOL_RDS
- */
-#define RDS_CANCEL_SENT_TO      	1
-#define RDS_GET_MR			2
-#define RDS_FREE_MR			3
-/* deprecated: RDS_BARRIER 4 */
-#define RDS_RECVERR			5
-#define RDS_CONG_MONITOR		6
-
-/*
- * Control message types for SOL_RDS.
- *
- * CMSG_RDMA_ARGS (sendmsg)
- *	Request a RDMA transfer to/from the specified
- *	memory ranges.
- *	The cmsg_data is a struct rds_rdma_args.
- * RDS_CMSG_RDMA_DEST (recvmsg, sendmsg)
- *	Kernel informs application about intended
- *	source/destination of a RDMA transfer
- * RDS_CMSG_RDMA_MAP (sendmsg)
- *	Application asks kernel to map the given
- *	memory range into a IB MR, and send the
- *	R_Key along in an RDS extension header.
- *	The cmsg_data is a struct rds_get_mr_args,
- *	the same as for the GET_MR setsockopt.
- * RDS_CMSG_RDMA_STATUS (recvmsg)
- *	Returns the status of a completed RDMA operation.
- */
-#define RDS_CMSG_RDMA_ARGS		1
-#define RDS_CMSG_RDMA_DEST		2
-#define RDS_CMSG_RDMA_MAP		3
-#define RDS_CMSG_RDMA_STATUS		4
-#define RDS_CMSG_CONG_UPDATE		5
-
-#define RDS_INFO_COUNTERS		10000
-#define RDS_INFO_CONNECTIONS		10001
-/* 10002 aka RDS_INFO_FLOWS is deprecated */
-#define RDS_INFO_SEND_MESSAGES		10003
-#define RDS_INFO_RETRANS_MESSAGES       10004
-#define RDS_INFO_RECV_MESSAGES          10005
-#define RDS_INFO_SOCKETS                10006
-#define RDS_INFO_TCP_SOCKETS            10007
-#define RDS_INFO_IB_CONNECTIONS		10008
-#define RDS_INFO_CONNECTION_STATS	10009
-
-struct rds_info_counter {
-	u_int8_t	name[32];
-	u_int64_t	value;
-} __attribute__((packed));
-
-#define RDS_INFO_CONNECTION_FLAG_SENDING	0x01
-#define RDS_INFO_CONNECTION_FLAG_CONNECTING	0x02
-#define RDS_INFO_CONNECTION_FLAG_CONNECTED	0x04
-
-struct rds_info_connection {
-	u_int64_t	next_tx_seq;
-	u_int64_t	next_rx_seq;
-	__be32		laddr;
-	__be32		faddr;
-	u_int8_t	transport[15];		/* null term ascii */
-	u_int8_t	flags;
-} __attribute__((packed));
-
-struct rds_info_flow {
-	__be32		laddr;
-	__be32		faddr;
-	u_int32_t	bytes;
-	__be16		lport;
-	__be16		fport;
-} __attribute__((packed));
-
-#define RDS_INFO_MESSAGE_FLAG_ACK               0x01
-#define RDS_INFO_MESSAGE_FLAG_FAST_ACK          0x02
-
-struct rds_info_message {
-	u_int64_t	seq;
-	u_int32_t	len;
-	__be32		laddr;
-	__be32		faddr;
-	__be16		lport;
-	__be16		fport;
-	u_int8_t	flags;
-} __attribute__((packed));
-
-struct rds_info_socket {
-	u_int32_t	sndbuf;
-	__be32		bound_addr;
-	__be32		connected_addr;
-	__be16		bound_port;
-	__be16		connected_port;
-	u_int32_t	rcvbuf;
-	u_int64_t	inum;
-} __attribute__((packed));
-
-struct rds_info_tcp_socket {
-	__be32		local_addr;
-	__be16		local_port;
-	__be32		peer_addr;
-	__be16		peer_port;
-	u_int64_t	hdr_rem;
-	u_int64_t	data_rem;
-	u_int32_t	last_sent_nxt;
-	u_int32_t	last_expected_una;
-	u_int32_t	last_seen_una;
-} __attribute__((packed));
-
-#define RDS_IB_GID_LEN	16
-struct rds_info_ib_connection {
-	__be32		src_addr;
-	__be32		dst_addr;
-	uint8_t		src_gid[RDS_IB_GID_LEN];
-	uint8_t		dst_gid[RDS_IB_GID_LEN];
-
-	uint32_t	max_send_wr;
-	uint32_t	max_recv_wr;
-	uint32_t	max_send_sge;
-	uint32_t	rdma_fmr_max;
-	uint32_t	rdma_fmr_size;
-};
-
-/*
- * Congestion monitoring.
- * Congestion control in RDS happens at the host connection
- * level by exchanging a bitmap marking congested ports.
- * By default, a process sleeping in poll() is always woken
- * up when the congestion map is updated.
- * With explicit monitoring, an application can have more
- * fine-grained control.
- * The application installs a 64bit mask value in the socket,
- * where each bit corresponds to a group of ports.
- * When a congestion update arrives, RDS checks the set of
- * ports that are now uncongested against the list bit mask
- * installed in the socket, and if they overlap, we queue a
- * cong_notification on the socket.
- *
- * To install the congestion monitor bitmask, use RDS_CONG_MONITOR
- * with the 64bit mask.
- * Congestion updates are received via RDS_CMSG_CONG_UPDATE
- * control messages.
- *
- * The correspondence between bits and ports is
- *	1 << (portnum % 64)
- */
-#define RDS_CONG_MONITOR_SIZE	64
-#define RDS_CONG_MONITOR_BIT(port)  (((unsigned int) port) % RDS_CONG_MONITOR_SIZE)
-#define RDS_CONG_MONITOR_MASK(port) (1ULL << RDS_CONG_MONITOR_BIT(port))
-
-/*
- * RDMA related types
- */
-
-/*
- * This encapsulates a remote memory location.
- * In the current implementation, it contains the R_Key
- * of the remote memory region, and the offset into it
- * (so that the application does not have to worry about
- * alignment).
- */
-typedef u_int64_t	rds_rdma_cookie_t;
-
-struct rds_iovec {
-	u_int64_t	addr;
-	u_int64_t	bytes;
-};
-
-struct rds_get_mr_args {
-	struct rds_iovec vec;
-	u_int64_t	cookie_addr;
-	uint64_t	flags;
-};
-
-struct rds_free_mr_args {
-	rds_rdma_cookie_t cookie;
-	u_int64_t	flags;
-};
-
-struct rds_rdma_args {
-	rds_rdma_cookie_t cookie;
-	struct rds_iovec remote_vec;
-	u_int64_t	local_vec_addr;
-	u_int64_t	nr_local;
-	u_int64_t	flags;
-	u_int64_t	user_token;
-};
-
-struct rds_rdma_notify {
-	u_int64_t	user_token;
-	int32_t		status;
-};
-
-#define RDS_RDMA_SUCCESS	0
-#define RDS_RDMA_REMOTE_ERROR	1
-#define RDS_RDMA_CANCELED	2
-#define RDS_RDMA_DROPPED	3
-#define RDS_RDMA_OTHER_ERROR	4
-
-/*
- * Common set of flags for all RDMA related structs
- */
-#define RDS_RDMA_READWRITE	0x0001
-#define RDS_RDMA_FENCE		0x0002	/* use FENCE for immediate send */
-#define RDS_RDMA_INVALIDATE	0x0004	/* invalidate R_Key after freeing MR */
-#define RDS_RDMA_USE_ONCE	0x0008	/* free MR after use */
-#define RDS_RDMA_DONTWAIT	0x0010	/* Don't wait in SET_BARRIER */
-#define RDS_RDMA_NOTIFY_ME	0x0020	/* Notify when operation completes */
-
-#endif /* IB_RDS_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_recv.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_recv.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_recv.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -83,7 +83,7 @@
 	struct rds_ib_recv_work *recv;
 	u32 i;
 
-	for(i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
+	for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
 		struct ib_sge *sge;
 
 		recv->r_ibinc = NULL;
@@ -97,17 +97,17 @@
 		sge = rds_ib_data_sge(ic, recv->r_sge);
 		sge->addr = 0;
 		sge->length = RDS_FRAG_SIZE;
-		sge->lkey = rds_ib_local_dma_lkey(ic);
+		sge->lkey = ic->i_mr->lkey;
 
 		sge = rds_ib_header_sge(ic, recv->r_sge);
 		sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
 		sge->length = sizeof(struct rds_header);
-		sge->lkey = rds_ib_local_dma_lkey(ic);
+		sge->lkey = ic->i_mr->lkey;
 	}
 }
 
 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
-			          struct rds_ib_recv_work *recv)
+				  struct rds_ib_recv_work *recv)
 {
 	if (recv->r_ibinc) {
 		rds_inc_put(&recv->r_ibinc->ii_inc);
@@ -126,14 +126,14 @@
 {
 	u32 i;
 
-	for(i = 0; i < ic->i_recv_ring.w_nr; i++)
+	for (i = 0; i < ic->i_recv_ring.w_nr; i++)
 		rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
 
 	if (ic->i_frag.f_page)
 		rds_ib_frag_drop_page(&ic->i_frag);
 }
 
-static int rds_ib_recv_refill_one(struct rds_connection *conn, 
+static int rds_ib_recv_refill_one(struct rds_connection *conn,
 				  struct rds_ib_recv_work *recv,
 				  gfp_t kptr_gfp, gfp_t page_gfp)
 {
@@ -276,7 +276,7 @@
 	struct rds_ib_incoming *ibinc;
 	struct rds_page_frag *frag;
 	struct rds_page_frag *pos;
-		
+
 	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 	rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
 
@@ -290,7 +290,7 @@
 void rds_ib_inc_free(struct rds_incoming *inc)
 {
 	struct rds_ib_incoming *ibinc;
-		
+
 	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
 
 	rds_ib_inc_purge(inc);
@@ -305,40 +305,40 @@
 			    size_t size)
 {
 	struct rds_ib_incoming *ibinc;
- 	struct rds_page_frag *frag;
+	struct rds_page_frag *frag;
 	struct iovec *iov = first_iov;
 	unsigned long to_copy;
- 	unsigned long frag_off = 0;
+	unsigned long frag_off = 0;
 	unsigned long iov_off = 0;
 	int copied = 0;
 	int ret;
 	u32 len;
 
 	ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
- 	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
+	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 	len = be32_to_cpu(inc->i_hdr.h_len);
 
 	while (copied < size && copied < len) {
- 		if (frag_off == RDS_FRAG_SIZE) {
- 			frag = list_entry(frag->f_item.next,
- 					  struct rds_page_frag, f_item);
- 			frag_off = 0;
+		if (frag_off == RDS_FRAG_SIZE) {
+			frag = list_entry(frag->f_item.next,
+					  struct rds_page_frag, f_item);
+			frag_off = 0;
 		}
 		while (iov_off == iov->iov_len) {
 			iov_off = 0;
 			iov++;
 		}
 
- 		to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
+		to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
 		to_copy = min_t(size_t, to_copy, size - copied);
 		to_copy = min_t(unsigned long, to_copy, len - copied);
 
 		rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
-			 "[%p, %lu] + %lu\n", 
+			 "[%p, %lu] + %lu\n",
 			 to_copy, iov->iov_base, iov->iov_len, iov_off,
 			 frag->f_page, frag->f_offset, frag_off);
 
-		/* XXX needs + offset for multiple recvs per page */ 
+		/* XXX needs + offset for multiple recvs per page */
 		ret = rds_page_copy_to_user(frag->f_page,
 					    frag->f_offset + frag_off,
 					    iov->iov_base + iov_off,
@@ -349,7 +349,7 @@
 		}
 
 		iov_off += to_copy;
- 		frag_off += to_copy;
+		frag_off += to_copy;
 		copied += to_copy;
 	}
 
@@ -364,7 +364,7 @@
 
 	sge->addr = ic->i_ack_dma;
 	sge->length = sizeof(struct rds_header);
-	sge->lkey = rds_ib_local_dma_lkey(ic);
+	sge->lkey = ic->i_mr->lkey;
 
 	wr->sg_list = sge;
 	wr->num_sge = 1;
@@ -466,7 +466,7 @@
 		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 
- 		rds_ib_stats_inc(s_ib_ack_send_failure);
+		rds_ib_stats_inc(s_ib_ack_send_failure);
 		/* Need to finesse this later. */
 		BUG();
 	} else
@@ -524,7 +524,7 @@
 	}
 
 	/* Can we get a send credit? */
-	if (!rds_ib_send_grab_credits(ic, 1, &adv_credits)) {
+	if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
 		rds_ib_stats_inc(s_ib_tx_throttle);
 		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 		return;
@@ -555,6 +555,47 @@
 	return rds_ib_get_ack(ic);
 }
 
+static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
+					    struct rds_ib_recv_work *recv,
+					    u32 data_len)
+{
+	struct rds_ib_connection *ic = conn->c_transport_data;
+	void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
+	void *addr;
+	u32 misplaced_hdr_bytes;
+
+	/*
+	 * Support header at the front (RDS 3.1+) as well as header-at-end.
+	 *
+	 * Cases:
+	 * 1) header all in header buff (great!)
+	 * 2) header all in data page (copy all to header buff)
+	 * 3) header split across hdr buf + data page
+	 *    (move bit in hdr buff to end before copying other bit from data page)
+	 */
+	if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
+	        return hdr_buff;
+
+	if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
+		addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
+		memcpy(hdr_buff,
+		       addr + recv->r_frag->f_offset + data_len,
+		       sizeof(struct rds_header));
+		kunmap_atomic(addr, KM_SOFTIRQ0);
+		return hdr_buff;
+	}
+
+	misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
+
+	memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
+
+	addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
+	memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
+	       sizeof(struct rds_header) - misplaced_hdr_bytes);
+	kunmap_atomic(addr, KM_SOFTIRQ0);
+	return hdr_buff;
+}
+
 /*
  * It's kind of lame that we're copying from the posted receive pages into
  * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
@@ -569,8 +610,8 @@
 	struct rds_cong_map *map;
 	unsigned int map_off;
 	unsigned int map_page;
- 	struct rds_page_frag *frag;
- 	unsigned long frag_off;
+	struct rds_page_frag *frag;
+	unsigned long frag_off;
 	unsigned long to_copy;
 	unsigned long copied;
 	uint64_t uncongested = 0;
@@ -584,7 +625,7 @@
 	map_page = 0;
 	map_off = 0;
 
- 	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
+	frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
 	frag_off = 0;
 
 	copied = 0;
@@ -593,7 +634,7 @@
 		uint64_t *src, *dst;
 		unsigned int k;
 
- 		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
+		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
 		BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
 
 		addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
@@ -617,10 +658,10 @@
 		}
 
 		frag_off += to_copy;
- 		if (frag_off == RDS_FRAG_SIZE) {
- 			frag = list_entry(frag->f_item.next,
- 					  struct rds_page_frag, f_item);
- 			frag_off = 0;
+		if (frag_off == RDS_FRAG_SIZE) {
+			frag = list_entry(frag->f_item.next,
+					  struct rds_page_frag, f_item);
+			frag_off = 0;
 		}
 	}
 
@@ -637,14 +678,15 @@
  * garbage, and we can tell a small 8 byte fragment from an ACK frame.
  */
 struct rds_ib_ack_state {
-	u64	ack_next;
-	u64	ack_recv;
-	int	ack_required : 1,
-		ack_next_valid : 1,
-		ack_recv_valid : 1;
+	u64		ack_next;
+	u64		ack_recv;
+	unsigned int	ack_required:1;
+	unsigned int	ack_next_valid:1;
+	unsigned int	ack_recv_valid:1;
 };
+
 static void rds_ib_process_recv(struct rds_connection *conn,
-				struct rds_ib_recv_work *recv, u32 byte_len,
+				struct rds_ib_recv_work *recv, u32 data_len,
 				struct rds_ib_ack_state *state)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
@@ -654,9 +696,9 @@
 	/* XXX shut down the connection if port 0,0 are seen? */
 
 	rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
-		 byte_len);
+		 data_len);
 
-	if (byte_len < sizeof(struct rds_header)) {
+	if (data_len < sizeof(struct rds_header)) {
 		rds_ib_conn_error(conn, "incoming message "
 		       "from %u.%u.%u.%u didn't inclue a "
 		       "header, disconnecting and "
@@ -664,9 +706,9 @@
 		       NIPQUAD(conn->c_faddr));
 		return;
 	}
-	byte_len -= sizeof(struct rds_header);
+	data_len -= sizeof(struct rds_header);
 
-	ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
+	ihdr = rds_ib_get_header(conn, recv, data_len);
 
 	/* Validate the checksum. */
 	if (!rds_message_verify_checksum(ihdr)) {
@@ -686,7 +728,7 @@
 	if (ihdr->h_credit)
 		rds_ib_send_add_credits(conn, ihdr->h_credit);
 
-	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) {
+	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
 		/* This is an ACK-only packet. The fact that it gets
 		 * special treatment here is that historically, ACKs
 		 * were rather special beasts.
@@ -712,7 +754,7 @@
 	 * fragment has a header and starts a message.. copy its header
 	 * into the inc and save the inc so we can hang upcoming fragments
 	 * off its list.
-	 */ 
+	 */
 	if (ibinc == NULL) {
 		ibinc = recv->r_ibinc;
 		recv->r_ibinc = NULL;
@@ -785,7 +827,6 @@
 	struct ib_wc wc;
 	struct rds_ib_ack_state state = { 0, };
 	struct rds_ib_recv_work *recv;
-	int ret = 0;
 
 	rdsdebug("conn %p cq %p\n", conn, cq);
 
@@ -800,18 +841,15 @@
 		rds_ib_stats_inc(s_ib_rx_cq_event);
 
 		recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
-#if 0
-		if (recv->r_sge[1].addr == 0)
-			printk("recv CQ ring: alloc ptr=%u/ctr=%u free ptr=%u/ctr=%u\n",
-				ic->i_recv_ring.w_alloc_ptr,
-				ic->i_recv_ring.w_alloc_ctr,
-				ic->i_recv_ring.w_free_ptr,
-				atomic_read(&ic->i_recv_ring.w_free_ctr));
-#endif
 
 		rds_ib_recv_unmap_page(ic, recv);
 
-		if (rds_conn_up(conn)) {
+		/*
+		 * Also process recvs in connecting state because it is possible
+		 * to get a recv completion _before_ the rdmacm ESTABLISHED
+		 * event is processed.
+		 */
+		if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
 			/* We expect errors as the qp is drained during shutdown */
 			if (wc.status == IB_WC_SUCCESS) {
 				rds_ib_process_recv(conn, recv, wc.byte_len, &state);
@@ -835,38 +873,16 @@
 	if (rds_conn_up(conn))
 		rds_ib_attempt_ack(ic);
 
-	/* 
-	 * XXX atomic is bad as it drains reserve pools, we should really
-	 * do some non-blocking alloc that doesn't touch the pools but
-	 * will fail.  Then leave it to the thread to get to reclaim
-	 * and alloc.
-	 */
-	
-	/* 
-	 * If we fail to refill we assume it's a allocation failure
-	 * from our use of GFP_ATOMIC and we want the thread to try again
-	 * immediately.  Similarly, if the thread is already trying to
-	 * refill we want it to try again immediately as it may have missed
-	 * the ring entry we just completed before it released the
-	 * i_recv_mutex.
-	 */
 	/* If we ever end up with a really empty receive ring, we're
 	 * in deep trouble, as the sender will definitely see RNR
 	 * timeouts. */
 	if (rds_ib_ring_empty(&ic->i_recv_ring))
 		rds_ib_stats_inc(s_ib_rx_ring_empty);
 
-	if (mutex_trylock(&ic->i_recv_mutex)) {
-		if (rds_ib_recv_refill(conn, GFP_ATOMIC,
-					 GFP_ATOMIC | __GFP_HIGHMEM, 0))
-			ret = -EAGAIN;
-		else
-			rds_ib_stats_inc(s_ib_rx_refill_from_cq);
-		mutex_unlock(&ic->i_recv_mutex);
-	} else 
-		ret = -EAGAIN;
-
-	if (ret)
+	/*
+	 * If the ring is running low, then schedule the thread to refill.
+	 */
+	if (rds_ib_ring_low(&ic->i_recv_ring))
 		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 }
 
@@ -888,6 +904,9 @@
 		rds_ib_stats_inc(s_ib_rx_refill_from_thread);
 	mutex_unlock(&ic->i_recv_mutex);
 
+	if (rds_conn_up(conn))
+		rds_ib_attempt_ack(ic);
+
 	return ret;
 }
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_ring.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_ring.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_ring.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -32,7 +32,6 @@
  */
 #include <linux/kernel.h>
 
-#define DEBUG
 #include "rds.h"
 #include "ib.h"
 
@@ -136,6 +135,11 @@
 	return __rds_ib_ring_empty(ring);
 }
 
+int rds_ib_ring_low(struct rds_ib_work_ring *ring)
+{
+        return __rds_ib_ring_used(ring) <= (ring->w_nr >> 1);
+}
+
 /*
  * returns the oldest alloced ring entry.  This will be the next one
  * freed.  This can't be called if there are none allocated.

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_send.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_send.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_send.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -69,13 +69,13 @@
 	if (op->r_mapped) {
 		ib_dma_unmap_sg(ic->i_cm_id->device,
 			op->r_sg, op->r_nents,
-			op->r_write?  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+			op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 		op->r_mapped = 0;
 	}
 }
 
-void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
-		          struct rds_ib_send_work *send,
+static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
+			  struct rds_ib_send_work *send,
 			  int wc_status)
 {
 	struct rds_message *rm = send->s_rm;
@@ -130,7 +130,7 @@
 	struct rds_ib_send_work *send;
 	u32 i;
 
-	for(i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
+	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
 		struct ib_sge *sge;
 
 		send->s_rm = NULL;
@@ -144,12 +144,12 @@
 		send->s_wr.ex.imm_data = 0;
 
 		sge = rds_ib_data_sge(ic, send->s_sge);
-		sge->lkey = rds_ib_local_dma_lkey(ic);
+		sge->lkey = ic->i_mr->lkey;
 
 		sge = rds_ib_header_sge(ic, send->s_sge);
 		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
 		sge->length = sizeof(struct rds_header);
-		sge->lkey = rds_ib_local_dma_lkey(ic);
+		sge->lkey = ic->i_mr->lkey;
 	}
 }
 
@@ -158,7 +158,7 @@
 	struct rds_ib_send_work *send;
 	u32 i;
 
-	for(i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
+	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
 		if (send->s_wr.opcode == 0xdead)
 			continue;
 		if (send->s_rm)
@@ -169,7 +169,7 @@
 }
 
 /*
- * The _oldest/_free ring operations here race cleanly with the alloc/unalloc 
+ * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
  * operations performed in the send path.  As the sender allocs and potentially
  * unallocs the next free entry in the ring it doesn't alter which is
  * the next to be freed, which is what this is concerned with.
@@ -188,11 +188,10 @@
 	rdsdebug("cq %p conn %p\n", cq, conn);
 	rds_ib_stats_inc(s_ib_tx_cq_call);
 	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-	if (ret) {
+	if (ret)
 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
-	}
 
-	while (ib_poll_cq(cq, 1, &wc) > 0 ) {
+	while (ib_poll_cq(cq, 1, &wc) > 0) {
 		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
 			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
 			 be32_to_cpu(wc.ex.imm_data));
@@ -227,7 +226,7 @@
 				if (printk_ratelimit())
 					printk(KERN_NOTICE
 						"RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
-						__FUNCTION__, send->s_wr.opcode);
+						__func__, send->s_wr.opcode);
 				break;
 			}
 
@@ -243,7 +242,8 @@
 			if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
 				struct rds_message *rm;
 
-				if ((rm = rds_send_get_message(conn, send->s_op)) != NULL)
+				rm = rds_send_get_message(conn, send->s_op);
+				if (rm)
 					rds_ib_send_rdma_complete(rm, wc.status);
 			}
 
@@ -290,7 +290,7 @@
  * credits (see rds_ib_send_add_credits below).
  *
  * The RDS send code is essentially single-threaded; rds_send_xmit
- * grabs c_send_sem to ensure exclusive access to the send ring.
+ * grabs c_send_lock to ensure exclusive access to the send ring.
  * However, the ACK sending code is independent and can race with
  * message SENDs.
  *
@@ -311,7 +311,7 @@
  * and using atomic_cmpxchg when updating the two counters.
  */
 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
-			     u32 wanted, u32 *adv_credits)
+			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
 {
 	unsigned int avail, posted, got = 0, advertise;
 	long oldval, newval;
@@ -329,7 +329,7 @@
 	rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
 			wanted, avail, posted);
 
-	/* The last credit must be used to send a credit updated. */
+	/* The last credit must be used to send a credit update. */
 	if (avail && !posted)
 		avail--;
 
@@ -345,8 +345,13 @@
 	}
 	newval -= IB_SET_SEND_CREDITS(got);
 
-	if (got && posted) {
-		advertise = min_t(unsigned int, posted, RDS_MAX_ADV_CREDIT);
+	/*
+	 * If need_posted is non-zero, then the caller wants
+	 * the posted regardless of whether any send credits are
+	 * available.
+	 */
+	if (posted && (got || need_posted)) {
+		advertise = min_t(unsigned int, posted, max_posted);
 		newval -= IB_SET_POST_CREDITS(advertise);
 	}
 
@@ -368,7 +373,7 @@
 	rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
 			credits,
 			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
-			test_bit(RDS_LL_SEND_FULL, &conn->c_flags)? ", ll_send_full" : "");
+			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
 
 	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
@@ -425,7 +430,7 @@
 		sge = rds_ib_data_sge(ic, send->s_sge);
 		sge->addr = buffer;
 		sge->length = length;
-		sge->lkey = rds_ib_local_dma_lkey(ic);
+		sge->lkey = ic->i_mr->lkey;
 
 		sge = rds_ib_header_sge(ic, send->s_sge);
 	} else {
@@ -437,7 +442,7 @@
 
 	sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
 	sge->length = sizeof(struct rds_header);
-	sge->lkey = rds_ib_local_dma_lkey(ic);
+	sge->lkey = ic->i_mr->lkey;
 }
 
 /*
@@ -447,14 +452,14 @@
  * in the IB work requests.  We translate the scatterlist into a series
  * of work requests that fragment the message.  These work requests complete
  * in order so we pass ownership of the message to the completion handler
- * once we send the final fragment. 
+ * once we send the final fragment.
  *
- * The RDS core uses the c_send_sem to only enter this function once
+ * The RDS core uses the c_send_lock to only enter this function once
  * per connection.  This makes sure that the tx ring alloc/unalloc pairs
  * don't get out of sync and confuse the ring.
  */
 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
-	        unsigned int hdr_off, unsigned int sg, unsigned int off)
+		unsigned int hdr_off, unsigned int sg, unsigned int off)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct ib_device *dev = ic->i_cm_id->device;
@@ -467,10 +472,12 @@
 	u32 i;
 	u32 work_alloc;
 	u32 credit_alloc;
+	u32 posted;
 	u32 adv_credits = 0;
 	int send_flags = 0;
 	int sent;
 	int ret;
+	int flow_controlled = 0;
 
 	BUG_ON(off % RDS_FRAG_SIZE);
 	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
@@ -491,13 +498,15 @@
 
 	credit_alloc = work_alloc;
 	if (ic->i_flowctl) {
-		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &adv_credits);
+		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
+		adv_credits += posted;
 		if (credit_alloc < work_alloc) {
 			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
 			work_alloc = credit_alloc;
+			flow_controlled++;
 		}
 		if (work_alloc == 0) {
-			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
 			rds_ib_stats_inc(s_ib_tx_throttle);
 			ret = -ENOMEM;
 			goto out;
@@ -558,6 +567,13 @@
 		 * should call rds_ib_ring_alloc first. */
 		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
 		rds_message_make_checksum(&rm->m_inc.i_hdr);
+
+		/*
+		 * Update adv_credits since we reset the ACK_REQUIRED bit.
+		 */
+		rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
+		adv_credits += posted;
+		BUG_ON(adv_credits > 255);
 	} else if (ic->i_rm != rm)
 		BUG();
 
@@ -593,7 +609,7 @@
 	}
 
 	/* if there's data reference it with a chain of work reqs */
-	for(; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
+	for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
 		unsigned int len;
 
 		send = &ic->i_sends[pos];
@@ -603,10 +619,11 @@
 				ib_sg_dma_address(dev, scat) + off, len,
 				send_flags);
 
-                /* 
-                 * We want to delay signaling completions just enough to get 
-                 * the batching benefits but not so much that we create dead time on the wire. 
-                 */
+		/*
+		 * We want to delay signaling completions just enough to get
+		 * the batching benefits but not so much that we create dead time
+		 * on the wire.
+		 */
 		if (ic->i_unsignaled_wrs-- == 0) {
 			ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
 			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
@@ -618,6 +635,13 @@
 			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
 		}
 
+		/*
+		 * Always signal the last one if we're stopping due to flow control.
+		 */
+		if (flow_controlled && i == (work_alloc-1)) {
+			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+		}
+
 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
 			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
 
@@ -680,8 +704,8 @@
 	/* XXX need to worry about failed_wr and partial sends. */
 	failed_wr = &first->s_wr;
 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
-	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 
-		 first, &first->s_wr, ret, failed_wr); 
+	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
+		 first, &first->s_wr, ret, failed_wr);
 	BUG_ON(failed_wr != &first->s_wr);
 	if (ret) {
 		printk(KERN_WARNING "RDS/IB: ib_post_send to %u.%u.%u.%u "
@@ -759,7 +783,7 @@
 	sent = 0;
 	num_sge = op->r_count;
 
-	for ( i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++ ) {
+	for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
 		send->s_wr.send_flags = 0;
 		send->s_queued = jiffies;
 		/*
@@ -779,9 +803,9 @@
 		if (num_sge > rds_ibdev->max_sge) {
 			send->s_wr.num_sge = rds_ibdev->max_sge;
 			num_sge -= rds_ibdev->max_sge;
+		} else {
+			send->s_wr.num_sge = num_sge;
 		}
-		else
-			send->s_wr.num_sge = num_sge;
 
 		send->s_wr.next = NULL;
 
@@ -793,7 +817,7 @@
 			send->s_sge[j].addr =
 				 ib_sg_dma_address(ic->i_cm_id->device, scat);
 			send->s_sge[j].length = len;
-			send->s_sge[j].lkey = rds_ib_local_dma_lkey(ic);
+			send->s_sge[j].lkey = ic->i_mr->lkey;
 
 			sent += len;
 			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
@@ -811,9 +835,8 @@
 	}
 
 	/* if we finished the message then send completion owns it */
-	if (scat == &op->r_sg[op->r_count]) {
+	if (scat == &op->r_sg[op->r_count])
 		prev->s_wr.send_flags = IB_SEND_SIGNALED;
-	}
 
 	if (i < work_alloc) {
 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_stats.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_stats.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_stats.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -37,7 +37,7 @@
 #include "rds.h"
 #include "ib.h"
 
-RDS_DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) 
+RDS_DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats)
 	____cacheline_aligned;
 
 static char *rds_ib_stat_names[] = {

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_sysctl.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_sysctl.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/ib_sysctl.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -53,7 +53,17 @@
 static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1;
 static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL;
 
-unsigned int rds_ib_sysctl_flow_control = 1;
+/*
+ * This sysctl does nothing.
+ *
+ * Backwards compatibility with RDS 3.0 wire protocol
+ * disables initial FC credit exchange.
+ * If it's ever possible to drop 3.0 support,
+ * setting this to 1 and moving init/refill of send/recv
+ * rings from ib_cm_connect_complete() back into ib_setup_qp()
+ * will cause credits to be added before protocol negotiation.
+ */
+unsigned int rds_ib_sysctl_flow_control = 0;
 
 ctl_table rds_ib_sysctl_table[] = {
 	{

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -65,7 +65,7 @@
 	unsigned long offset;
 };
 
-static spinlock_t rds_info_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rds_info_lock);
 static rds_info_func rds_info_funcs[RDS_INFO_LAST - RDS_INFO_FIRST + 1];
 
 void rds_info_register_func(int optname, rds_info_func func)
@@ -79,6 +79,7 @@
 	rds_info_funcs[offset] = func;
 	spin_unlock(&rds_info_lock);
 }
+EXPORT_SYMBOL_GPL(rds_info_register_func);
 
 void rds_info_deregister_func(int optname, rds_info_func func)
 {
@@ -91,6 +92,7 @@
 	rds_info_funcs[offset] = NULL;
 	spin_unlock(&rds_info_lock);
 }
+EXPORT_SYMBOL_GPL(rds_info_deregister_func);
 
 /*
  * Typically we hold an atomic kmap across multiple rds_info_copy() calls
@@ -183,7 +185,7 @@
 	nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK))
 			>> PAGE_SHIFT;
 
-	pages = kmalloc(nr_pages * sizeof (struct page *), GFP_KERNEL);
+	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
 	if (pages == NULL) {
 		ret = -ENOMEM;
 		goto out;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/info.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,19 +1,6 @@
 #ifndef _RDS_INFO_H
-#define _RDS_INFO_H 
+#define _RDS_INFO_H
 
-/* FIXME remove these */
-#define RDS_INFO_COUNTERS		10000
-#define RDS_INFO_CONNECTIONS		10001
-#define RDS_INFO_FLOWS			10002
-#define RDS_INFO_SEND_MESSAGES		10003
-#define RDS_INFO_RETRANS_MESSAGES	10004
-#define RDS_INFO_RECV_MESSAGES		10005
-#define RDS_INFO_SOCKETS		10006
-#define RDS_INFO_TCP_SOCKETS		10007
-
-#define RDS_INFO_FIRST		RDS_INFO_COUNTERS
-#define RDS_INFO_LAST		RDS_INFO_CONNECTION_STATS
-
 struct rds_info_lengths {
 	unsigned int	nr;
 	unsigned int	each;
@@ -26,7 +13,7 @@
  * of the available info source.  If the snapshot fits in @len then it
  * should be copied using @iter.  The caller will deduce if it was copied
  * or not by comparing the lengths.
- */ 
+ */
 typedef void (*rds_info_func)(struct socket *sock, unsigned int len,
 			      struct rds_info_iterator *iter,
 			      struct rds_info_lengths *lens);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+
+#include "rds.h"
+#include "iw.h"
+
+unsigned int fastreg_pool_size = RDS_FASTREG_POOL_SIZE;
+unsigned int fastreg_message_size = RDS_FASTREG_SIZE + 1; /* +1 allows for unaligned MRs */
+
+module_param(fastreg_pool_size, int, 0444);
+MODULE_PARM_DESC(fastreg_pool_size, " Max number of fastreg MRs per device");
+module_param(fastreg_message_size, int, 0444);
+MODULE_PARM_DESC(fastreg_message_size, " Max size of a RDMA transfer (fastreg MRs)");
+
+struct list_head rds_iw_devices;
+
+/* NOTE: if also grabbing iwdev lock, grab this first */
+DEFINE_SPINLOCK(iw_nodev_conns_lock);
+LIST_HEAD(iw_nodev_conns);
+
+void rds_iw_add_one(struct ib_device *device)
+{
+	struct rds_iw_device *rds_iwdev;
+	struct ib_device_attr *dev_attr;
+
+	/* Only handle iwarp devices */
+	if (device->node_type != RDMA_NODE_RNIC)
+		return;
+
+	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
+	if (!dev_attr)
+		return;
+
+	if (ib_query_device(device, dev_attr)) {
+		rdsdebug("Query device failed for %s\n", device->name);
+		goto free_attr;
+	}
+
+	rds_iwdev = kmalloc(sizeof *rds_iwdev, GFP_KERNEL);
+	if (!rds_iwdev)
+		goto free_attr;
+
+	spin_lock_init(&rds_iwdev->spinlock);
+
+	rds_iwdev->dma_local_lkey = !!(dev_attr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY);
+	rds_iwdev->max_wrs = dev_attr->max_qp_wr;
+	rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE);
+
+	rds_iwdev->page_shift = max(PAGE_SHIFT, ffs(dev_attr->page_size_cap) - 1);
+
+	rds_iwdev->dev = device;
+	rds_iwdev->pd = ib_alloc_pd(device);
+	if (IS_ERR(rds_iwdev->pd))
+		goto free_dev;
+
+	if (!rds_iwdev->dma_local_lkey) {
+		if (device->node_type != RDMA_NODE_RNIC) {
+			rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
+						IB_ACCESS_LOCAL_WRITE);
+		} else {
+			rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
+						IB_ACCESS_REMOTE_READ |
+						IB_ACCESS_REMOTE_WRITE |
+						IB_ACCESS_LOCAL_WRITE);
+		}
+		if (IS_ERR(rds_iwdev->mr))
+			goto err_pd;
+	} else
+		rds_iwdev->mr = NULL;
+
+	rds_iwdev->mr_pool = rds_iw_create_mr_pool(rds_iwdev);
+	if (IS_ERR(rds_iwdev->mr_pool)) {
+		rds_iwdev->mr_pool = NULL;
+		goto err_mr;
+	}
+
+	INIT_LIST_HEAD(&rds_iwdev->cm_id_list);
+	INIT_LIST_HEAD(&rds_iwdev->conn_list);
+	list_add_tail(&rds_iwdev->list, &rds_iw_devices);
+
+	ib_set_client_data(device, &rds_iw_client, rds_iwdev);
+
+	goto free_attr;
+
+err_mr:
+	if (rds_iwdev->mr)
+		ib_dereg_mr(rds_iwdev->mr);
+err_pd:
+	ib_dealloc_pd(rds_iwdev->pd);
+free_dev:
+	kfree(rds_iwdev);
+free_attr:
+	kfree(dev_attr);
+}
+
+void rds_iw_remove_one(struct ib_device *device)
+{
+	struct rds_iw_device *rds_iwdev;
+	struct rds_iw_cm_id *i_cm_id, *next;
+
+	rds_iwdev = ib_get_client_data(device, &rds_iw_client);
+	if (!rds_iwdev)
+		return;
+
+	spin_lock_irq(&rds_iwdev->spinlock);
+	list_for_each_entry_safe(i_cm_id, next, &rds_iwdev->cm_id_list, list) {
+		list_del(&i_cm_id->list);
+		kfree(i_cm_id);
+	}
+	spin_unlock_irq(&rds_iwdev->spinlock);
+
+	rds_iw_destroy_conns(rds_iwdev);
+
+	if (rds_iwdev->mr_pool)
+		rds_iw_destroy_mr_pool(rds_iwdev->mr_pool);
+
+	if (rds_iwdev->mr)
+		ib_dereg_mr(rds_iwdev->mr);
+
+	while (ib_dealloc_pd(rds_iwdev->pd)) {
+		rdsdebug("%s-%d Failed to dealloc pd %p\n", __func__, __LINE__, rds_iwdev->pd);
+		msleep(1);
+	}
+
+	list_del(&rds_iwdev->list);
+	kfree(rds_iwdev);
+}
+
+struct ib_client rds_iw_client = {
+	.name   = "rds_iw",
+	.add    = rds_iw_add_one,
+	.remove = rds_iw_remove_one
+};
+
+static int rds_iw_conn_info_visitor(struct rds_connection *conn,
+				    void *buffer)
+{
+	struct rds_info_rdma_connection *iinfo = buffer;
+	struct rds_iw_connection *ic;
+
+	/* We will only ever look at IB transports */
+	if (conn->c_trans != &rds_iw_transport)
+		return 0;
+
+	iinfo->src_addr = conn->c_laddr;
+	iinfo->dst_addr = conn->c_faddr;
+
+	memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
+	memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
+	if (rds_conn_state(conn) == RDS_CONN_UP) {
+		struct rds_iw_device *rds_iwdev;
+		struct rdma_dev_addr *dev_addr;
+
+		ic = conn->c_transport_data;
+		dev_addr = &ic->i_cm_id->route.addr.dev_addr;
+
+		ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
+		ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
+
+		rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
+		iinfo->max_send_wr = ic->i_send_ring.w_nr;
+		iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
+		iinfo->max_send_sge = rds_iwdev->max_sge;
+		rds_iw_get_mr_info(rds_iwdev, iinfo);
+	}
+	return 1;
+}
+
+static void rds_iw_ic_info(struct socket *sock, unsigned int len,
+			   struct rds_info_iterator *iter,
+			   struct rds_info_lengths *lens)
+{
+	rds_for_each_conn_info(sock, len, iter, lens,
+				rds_iw_conn_info_visitor,
+				sizeof(struct rds_info_rdma_connection));
+}
+
+
+/*
+ * Early RDS/IB was built to only bind to an address if there is an IPoIB
+ * device with that address set.
+ *
+ * If it were me, I'd advocate for something more flexible.  Sending and
+ * receiving should be device-agnostic.  Transports would try and maintain
+ * connections between peers who have messages queued.  Userspace would be
+ * allowed to influence which paths have priority.  We could call userspace
+ * asserting this policy "routing".
+ */
+static int rds_iw_laddr_check(__be32 addr)
+{
+	int ret;
+	struct rdma_cm_id *cm_id;
+	struct sockaddr_in sin;
+
+	/* Create a CMA ID and try to bind it. This catches both
+	 * IB and iWARP capable NICs.
+	 */
+	cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
+	if (IS_ERR(cm_id))
+		return PTR_ERR(cm_id);
+
+	memset(&sin, 0, sizeof(sin));
+	sin.sin_family = AF_INET;
+	sin.sin_addr.s_addr = addr;
+
+	/* rdma_bind_addr will only succeed for IB & iWARP devices */
+	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+	/* due to this, we will claim to support IB devices unless we
+	   check node_type. */
+	if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
+		ret = -EADDRNOTAVAIL;
+
+	rdsdebug("addr %u.%u.%u.%u ret %d node type %d\n",
+		NIPQUAD(addr), ret,
+		cm_id->device ? cm_id->device->node_type : -1);
+
+	rdma_destroy_id(cm_id);
+
+	return ret;
+}
+
+void rds_iw_exit(void)
+{
+	rds_info_deregister_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
+	rds_iw_destroy_nodev_conns();
+	ib_unregister_client(&rds_iw_client);
+	rds_iw_sysctl_exit();
+	rds_iw_recv_exit();
+	rds_trans_unregister(&rds_iw_transport);
+}
+
+struct rds_transport rds_iw_transport = {
+	.laddr_check		= rds_iw_laddr_check,
+	.xmit_complete		= rds_iw_xmit_complete,
+	.xmit			= rds_iw_xmit,
+	.xmit_cong_map		= NULL,
+	.xmit_rdma		= rds_iw_xmit_rdma,
+	.recv			= rds_iw_recv,
+	.conn_alloc		= rds_iw_conn_alloc,
+	.conn_free		= rds_iw_conn_free,
+	.conn_connect		= rds_iw_conn_connect,
+	.conn_shutdown		= rds_iw_conn_shutdown,
+	.inc_copy_to_user	= rds_iw_inc_copy_to_user,
+	.inc_purge		= rds_iw_inc_purge,
+	.inc_free		= rds_iw_inc_free,
+	.cm_initiate_connect	= rds_iw_cm_initiate_connect,
+	.cm_handle_connect	= rds_iw_cm_handle_connect,
+	.cm_connect_complete	= rds_iw_cm_connect_complete,
+	.stats_info_copy	= rds_iw_stats_info_copy,
+	.exit			= rds_iw_exit,
+	.get_mr			= rds_iw_get_mr,
+	.sync_mr		= rds_iw_sync_mr,
+	.free_mr		= rds_iw_free_mr,
+	.flush_mrs		= rds_iw_flush_mrs,
+	.t_owner		= THIS_MODULE,
+	.t_name			= "iwarp",
+	.t_prefer_loopback	= 1,
+};
+
+int __init rds_iw_init(void)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&rds_iw_devices);
+
+	ret = ib_register_client(&rds_iw_client);
+	if (ret)
+		goto out;
+
+	ret = rds_iw_sysctl_init();
+	if (ret)
+		goto out_ibreg;
+
+	ret = rds_iw_recv_init();
+	if (ret)
+		goto out_sysctl;
+
+	ret = rds_trans_register(&rds_iw_transport);
+	if (ret)
+		goto out_recv;
+
+	rds_info_register_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
+
+	goto out;
+
+out_recv:
+	rds_iw_recv_exit();
+out_sysctl:
+	rds_iw_sysctl_exit();
+out_ibreg:
+	ib_unregister_client(&rds_iw_client);
+out:
+	return ret;
+}
+
+MODULE_LICENSE("GPL");
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,399 @@
+#ifndef _RDS_IW_H
+#define _RDS_IW_H
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include "rds.h"
+#include "rdma_transport.h"
+
+#define RDS_FASTREG_SIZE		20
+#define RDS_FASTREG_POOL_SIZE		2048
+
+#define RDS_IW_MAX_SGE			8
+#define RDS_IW_RECV_SGE 		2
+
+#define RDS_IW_DEFAULT_RECV_WR		1024
+#define RDS_IW_DEFAULT_SEND_WR		256
+
+#define RDS_IW_SUPPORTED_PROTOCOLS	0x00000003	/* minor versions supported */
+
+extern struct list_head rds_iw_devices;
+
+/*
+ * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
+ * try and minimize the amount of memory tied up both the device and
+ * socket receive queues.
+ */
+/* page offset of the final full frag that fits in the page */
+#define RDS_PAGE_LAST_OFF (((PAGE_SIZE  / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
+struct rds_page_frag {
+	struct list_head	f_item;
+	struct page		*f_page;
+	unsigned long		f_offset;
+	dma_addr_t 		f_mapped;
+};
+
+struct rds_iw_incoming {
+	struct list_head	ii_frags;
+	struct rds_incoming	ii_inc;
+};
+
+struct rds_iw_connect_private {
+	/* Add new fields at the end, and don't permute existing fields. */
+	__be32			dp_saddr;
+	__be32			dp_daddr;
+	u8			dp_protocol_major;
+	u8			dp_protocol_minor;
+	__be16			dp_protocol_minor_mask; /* bitmask */
+	__be32			dp_reserved1;
+	__be64			dp_ack_seq;
+	__be32			dp_credit;		/* non-zero enables flow ctl */
+};
+
+struct rds_iw_scatterlist {
+	struct scatterlist	*list;
+	unsigned int		len;
+	int			dma_len;
+	unsigned int		dma_npages;
+	unsigned int		bytes;
+};
+
+struct rds_iw_mapping {
+	spinlock_t		m_lock;	/* protect the mapping struct */
+	struct list_head	m_list;
+	struct rds_iw_mr	*m_mr;
+	uint32_t		m_rkey;
+	struct rds_iw_scatterlist m_sg;
+};
+
+struct rds_iw_send_work {
+	struct rds_message	*s_rm;
+
+	/* We should really put these into a union: */
+	struct rds_rdma_op	*s_op;
+	struct rds_iw_mapping	*s_mapping;
+	struct ib_mr		*s_mr;
+	struct ib_fast_reg_page_list *s_page_list;
+	unsigned char		s_remap_count;
+
+	struct ib_send_wr	s_wr;
+	struct ib_sge		s_sge[RDS_IW_MAX_SGE];
+	unsigned long		s_queued;
+};
+
+struct rds_iw_recv_work {
+	struct rds_iw_incoming 	*r_iwinc;
+	struct rds_page_frag	*r_frag;
+	struct ib_recv_wr	r_wr;
+	struct ib_sge		r_sge[2];
+};
+
+struct rds_iw_work_ring {
+	u32		w_nr;
+	u32		w_alloc_ptr;
+	u32		w_alloc_ctr;
+	u32		w_free_ptr;
+	atomic_t	w_free_ctr;
+};
+
+struct rds_iw_device;
+
+struct rds_iw_connection {
+
+	struct list_head	iw_node;
+	struct rds_iw_device 	*rds_iwdev;
+	struct rds_connection	*conn;
+
+	/* alphabet soup, IBTA style */
+	struct rdma_cm_id	*i_cm_id;
+	struct ib_pd		*i_pd;
+	struct ib_mr		*i_mr;
+	struct ib_cq		*i_send_cq;
+	struct ib_cq		*i_recv_cq;
+
+	/* tx */
+	struct rds_iw_work_ring	i_send_ring;
+	struct rds_message	*i_rm;
+	struct rds_header	*i_send_hdrs;
+	u64			i_send_hdrs_dma;
+	struct rds_iw_send_work *i_sends;
+
+	/* rx */
+	struct mutex		i_recv_mutex;
+	struct rds_iw_work_ring	i_recv_ring;
+	struct rds_iw_incoming	*i_iwinc;
+	u32			i_recv_data_rem;
+	struct rds_header	*i_recv_hdrs;
+	u64			i_recv_hdrs_dma;
+	struct rds_iw_recv_work *i_recvs;
+	struct rds_page_frag	i_frag;
+	u64			i_ack_recv;	/* last ACK received */
+
+	/* sending acks */
+	unsigned long		i_ack_flags;
+#ifndef KERNEL_HAS_ATOMIC64
+	spinlock_t		i_ack_lock;	/* protect i_ack_next */
+	u64			i_ack_next;	/* next ACK to send */
+#else
+	atomic64_t		i_ack_next;	/* next ACK to send */
+#endif
+	struct rds_header	*i_ack;
+	struct ib_send_wr	i_ack_wr;
+	struct ib_sge		i_ack_sge;
+	u64			i_ack_dma;
+	unsigned long		i_ack_queued;
+
+	/* Flow control related information
+	 *
+	 * Our algorithm uses a pair variables that we need to access
+	 * atomically - one for the send credits, and one posted
+	 * recv credits we need to transfer to remote.
+	 * Rather than protect them using a slow spinlock, we put both into
+	 * a single atomic_t and update it using cmpxchg
+	 */
+	atomic_t		i_credits;
+
+	/* Protocol version specific information */
+	unsigned int		i_flowctl:1;	/* enable/disable flow ctl */
+	unsigned int		i_dma_local_lkey:1;
+	unsigned int		i_fastreg_posted:1; /* fastreg posted on this connection */
+	/* Batched completions */
+	unsigned int		i_unsignaled_wrs;
+	long			i_unsignaled_bytes;
+};
+
+/* This assumes that atomic_t is at least 32 bits */
+#define IB_GET_SEND_CREDITS(v)	((v) & 0xffff)
+#define IB_GET_POST_CREDITS(v)	((v) >> 16)
+#define IB_SET_SEND_CREDITS(v)	((v) & 0xffff)
+#define IB_SET_POST_CREDITS(v)	((v) << 16)
+
+struct rds_iw_cm_id {
+	struct list_head	list;
+	struct rdma_cm_id	*cm_id;
+};
+
+struct rds_iw_device {
+	struct list_head	list;
+	struct list_head	cm_id_list;
+	struct list_head	conn_list;
+	struct ib_device	*dev;
+	struct ib_pd		*pd;
+	struct ib_mr		*mr;
+	struct rds_iw_mr_pool	*mr_pool;
+	int			page_shift;
+	int			max_sge;
+	unsigned int		max_wrs;
+	unsigned int		dma_local_lkey:1;
+	spinlock_t		spinlock;	/* protect the above */
+};
+
+/* bits for i_ack_flags */
+#define IB_ACK_IN_FLIGHT	0
+#define IB_ACK_REQUESTED	1
+
+/* Magic WR_ID for ACKs */
+#define RDS_IW_ACK_WR_ID	((u64)0xffffffffffffffffULL)
+#define RDS_IW_FAST_REG_WR_ID	((u64)0xefefefefefefefefULL)
+#define RDS_IW_LOCAL_INV_WR_ID	((u64)0xdfdfdfdfdfdfdfdfULL)
+
+struct rds_iw_statistics {
+	uint64_t	s_iw_connect_raced;
+	uint64_t	s_iw_listen_closed_stale;
+	uint64_t	s_iw_tx_cq_call;
+	uint64_t	s_iw_tx_cq_event;
+	uint64_t	s_iw_tx_ring_full;
+	uint64_t	s_iw_tx_throttle;
+	uint64_t	s_iw_tx_sg_mapping_failure;
+	uint64_t	s_iw_tx_stalled;
+	uint64_t	s_iw_tx_credit_updates;
+	uint64_t	s_iw_rx_cq_call;
+	uint64_t	s_iw_rx_cq_event;
+	uint64_t	s_iw_rx_ring_empty;
+	uint64_t	s_iw_rx_refill_from_cq;
+	uint64_t	s_iw_rx_refill_from_thread;
+	uint64_t	s_iw_rx_alloc_limit;
+	uint64_t	s_iw_rx_credit_updates;
+	uint64_t	s_iw_ack_sent;
+	uint64_t	s_iw_ack_send_failure;
+	uint64_t	s_iw_ack_send_delayed;
+	uint64_t	s_iw_ack_send_piggybacked;
+	uint64_t	s_iw_ack_received;
+	uint64_t	s_iw_rdma_mr_alloc;
+	uint64_t	s_iw_rdma_mr_free;
+	uint64_t	s_iw_rdma_mr_used;
+	uint64_t	s_iw_rdma_mr_pool_flush;
+	uint64_t	s_iw_rdma_mr_pool_wait;
+	uint64_t	s_iw_rdma_mr_pool_depleted;
+};
+
+extern struct workqueue_struct *rds_iw_wq;
+
+/*
+ * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
+ * doesn't define it.
+ */
+static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev,
+		struct scatterlist *sg, unsigned int sg_dma_len, int direction)
+{
+	unsigned int i;
+
+	for (i = 0; i < sg_dma_len; ++i) {
+		ib_dma_sync_single_for_cpu(dev,
+				ib_sg_dma_address(dev, &sg[i]),
+				ib_sg_dma_len(dev, &sg[i]),
+				direction);
+	}
+}
+#define ib_dma_sync_sg_for_cpu	rds_iw_dma_sync_sg_for_cpu
+
+static inline void rds_iw_dma_sync_sg_for_device(struct ib_device *dev,
+		struct scatterlist *sg, unsigned int sg_dma_len, int direction)
+{
+	unsigned int i;
+
+	for (i = 0; i < sg_dma_len; ++i) {
+		ib_dma_sync_single_for_device(dev,
+				ib_sg_dma_address(dev, &sg[i]),
+				ib_sg_dma_len(dev, &sg[i]),
+				direction);
+	}
+}
+#define ib_dma_sync_sg_for_device	rds_iw_dma_sync_sg_for_device
+
+static inline u32 rds_iw_local_dma_lkey(struct rds_iw_connection *ic)
+{
+	return ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey;
+}
+
+/* ib.c */
+extern struct rds_transport rds_iw_transport;
+extern void rds_iw_add_one(struct ib_device *device);
+extern void rds_iw_remove_one(struct ib_device *device);
+extern struct ib_client rds_iw_client;
+
+extern unsigned int fastreg_pool_size;
+extern unsigned int fastreg_message_size;
+
+extern spinlock_t iw_nodev_conns_lock;
+extern struct list_head iw_nodev_conns;
+
+/* ib_cm.c */
+int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp);
+void rds_iw_conn_free(void *arg);
+int rds_iw_conn_connect(struct rds_connection *conn);
+void rds_iw_conn_shutdown(struct rds_connection *conn);
+void rds_iw_state_change(struct sock *sk);
+int __init rds_iw_listen_init(void);
+void rds_iw_listen_stop(void);
+void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...);
+int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
+			     struct rdma_cm_event *event);
+int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id);
+void rds_iw_cm_connect_complete(struct rds_connection *conn,
+				struct rdma_cm_event *event);
+
+
+#define rds_iw_conn_error(conn, fmt...) \
+	__rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt)
+
+/* ib_rdma.c */
+int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
+void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn);
+void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn);
+void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock);
+static inline void rds_iw_destroy_nodev_conns(void)
+{
+	__rds_iw_destroy_conns(&iw_nodev_conns, &iw_nodev_conns_lock);
+}
+static inline void rds_iw_destroy_conns(struct rds_iw_device *rds_iwdev)
+{
+	__rds_iw_destroy_conns(&rds_iwdev->conn_list, &rds_iwdev->spinlock);
+}
+struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *);
+void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo);
+void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *);
+void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
+		    struct rds_sock *rs, u32 *key_ret);
+void rds_iw_sync_mr(void *trans_private, int dir);
+void rds_iw_free_mr(void *trans_private, int invalidate);
+void rds_iw_flush_mrs(void);
+void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
+
+/* ib_recv.c */
+int __init rds_iw_recv_init(void);
+void rds_iw_recv_exit(void);
+int rds_iw_recv(struct rds_connection *conn);
+int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
+		       gfp_t page_gfp, int prefill);
+void rds_iw_inc_purge(struct rds_incoming *inc);
+void rds_iw_inc_free(struct rds_incoming *inc);
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
+			     size_t size);
+void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
+void rds_iw_recv_clear_ring(struct rds_iw_connection *ic);
+void rds_iw_recv_init_ack(struct rds_iw_connection *ic);
+void rds_iw_attempt_ack(struct rds_iw_connection *ic);
+void rds_iw_ack_send_complete(struct rds_iw_connection *ic);
+u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic);
+
+/* ib_ring.c */
+void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr);
+void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr);
+u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos);
+void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val);
+void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val);
+int rds_iw_ring_empty(struct rds_iw_work_ring *ring);
+int rds_iw_ring_low(struct rds_iw_work_ring *ring);
+u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring);
+u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest);
+extern wait_queue_head_t rds_iw_ring_empty_wait;
+
+/* ib_send.c */
+void rds_iw_xmit_complete(struct rds_connection *conn);
+int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
+		unsigned int hdr_off, unsigned int sg, unsigned int off);
+void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_iw_send_init_ring(struct rds_iw_connection *ic);
+void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
+int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
+void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
+void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
+int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
+			     u32 *adv_credits, int need_posted, int max_posted);
+
+/* ib_stats.c */
+RDS_DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats);
+#define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member)
+unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
+				    unsigned int avail);
+
+/* ib_sysctl.c */
+int __init rds_iw_sysctl_init(void);
+void rds_iw_sysctl_exit(void);
+extern unsigned long rds_iw_sysctl_max_send_wr;
+extern unsigned long rds_iw_sysctl_max_recv_wr;
+extern unsigned long rds_iw_sysctl_max_unsig_wrs;
+extern unsigned long rds_iw_sysctl_max_unsig_bytes;
+extern unsigned long rds_iw_sysctl_max_recv_allocation;
+extern unsigned int rds_iw_sysctl_flow_control;
+extern ctl_table rds_iw_sysctl_table[];
+
+/*
+ * Helper functions for getting/setting the header and data SGEs in
+ * RDS packets (not RDMA)
+ */
+static inline struct ib_sge *
+rds_iw_header_sge(struct rds_iw_connection *ic, struct ib_sge *sge)
+{
+	return &sge[0];
+}
+
+static inline struct ib_sge *
+rds_iw_data_sge(struct rds_iw_connection *ic, struct ib_sge *sge)
+{
+	return &sge[1];
+}
+
+#endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_cm.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_cm.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_cm.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,760 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <linux/vmalloc.h>
+
+#include "rds.h"
+#include "iw.h"
+
+/*
+ * Set the selected protocol version
+ */
+static void rds_iw_set_protocol(struct rds_connection *conn, unsigned int version)
+{
+	conn->c_version = version;
+}
+
+/*
+ * Set up flow control
+ */
+static void rds_iw_set_flow_control(struct rds_connection *conn, u32 credits)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	if (rds_iw_sysctl_flow_control && credits != 0) {
+		/* We're doing flow control */
+		ic->i_flowctl = 1;
+		rds_iw_send_add_credits(conn, credits);
+	} else {
+		ic->i_flowctl = 0;
+	}
+}
+
+/*
+ * Connection established.
+ * We get here for both outgoing and incoming connection.
+ */
+void rds_iw_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
+{
+	const struct rds_iw_connect_private *dp = NULL;
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct rds_iw_device *rds_iwdev;
+	int err;
+
+	if (event->param.conn.private_data_len) {
+		dp = event->param.conn.private_data;
+
+		rds_iw_set_protocol(conn,
+				RDS_PROTOCOL(dp->dp_protocol_major,
+					dp->dp_protocol_minor));
+		rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+	}
+
+	/* update ib_device with this local ipaddr & conn */
+	rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
+	err = rds_iw_update_cm_id(rds_iwdev, ic->i_cm_id);
+	if (err)
+		printk(KERN_ERR "rds_iw_update_ipaddr failed (%d)\n", err);
+	rds_iw_add_conn(rds_iwdev, conn);
+
+	/* If the peer gave us the last packet it saw, process this as if
+	 * we had received a regular ACK. */
+	if (dp && dp->dp_ack_seq)
+		rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
+
+	printk(KERN_NOTICE "RDS/IW: connected to %u.%u.%u.%u<->%u.%u.%u.%u version %u.%u%s\n",
+			NIPQUAD(conn->c_laddr), NIPQUAD(conn->c_faddr),
+			RDS_PROTOCOL_MAJOR(conn->c_version),
+			RDS_PROTOCOL_MINOR(conn->c_version),
+			ic->i_flowctl ? ", flow control" : "");
+
+	rds_connect_complete(conn);
+}
+
+static void rds_iw_cm_fill_conn_param(struct rds_connection *conn,
+			struct rdma_conn_param *conn_param,
+			struct rds_iw_connect_private *dp,
+			u32 protocol_version)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	memset(conn_param, 0, sizeof(struct rdma_conn_param));
+	/* XXX tune these? */
+	conn_param->responder_resources = 1;
+	conn_param->initiator_depth = 1;
+
+	if (dp) {
+		memset(dp, 0, sizeof(*dp));
+		dp->dp_saddr = conn->c_laddr;
+		dp->dp_daddr = conn->c_faddr;
+		dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
+		dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
+		dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IW_SUPPORTED_PROTOCOLS);
+		dp->dp_ack_seq = rds_iw_piggyb_ack(ic);
+
+		/* Advertise flow control */
+		if (ic->i_flowctl) {
+			unsigned int credits;
+
+			credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
+			dp->dp_credit = cpu_to_be32(credits);
+			atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
+		}
+
+		conn_param->private_data = dp;
+		conn_param->private_data_len = sizeof(*dp);
+	}
+}
+
+static void rds_iw_cq_event_handler(struct ib_event *event, void *data)
+{
+	rdsdebug("event %u data %p\n", event->event, data);
+}
+
+static void rds_iw_qp_event_handler(struct ib_event *event, void *data)
+{
+	struct rds_connection *conn = data;
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event);
+
+	switch (event->event) {
+	case IB_EVENT_COMM_EST:
+		rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
+		break;
+	case IB_EVENT_QP_REQ_ERR:
+	case IB_EVENT_QP_FATAL:
+	default:
+		rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %u.%u.%u.%u->%u.%u.%u.%u...reconnecting\n",
+			event->event, NIPQUAD(conn->c_laddr),
+			NIPQUAD(conn->c_faddr));
+		break;
+	}
+}
+
+/*
+ * Create a QP
+ */
+static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
+		struct rds_iw_device *rds_iwdev,
+		struct rds_iw_work_ring *send_ring,
+		void (*send_cq_handler)(struct ib_cq *, void *),
+		struct rds_iw_work_ring *recv_ring,
+		void (*recv_cq_handler)(struct ib_cq *, void *),
+		void *context)
+{
+	struct ib_device *dev = rds_iwdev->dev;
+	unsigned int send_size, recv_size;
+	int ret;
+
+	/* The offset of 1 is to accomodate the additional ACK WR. */
+	send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1);
+	recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1);
+	rds_iw_ring_resize(send_ring, send_size - 1);
+	rds_iw_ring_resize(recv_ring, recv_size - 1);
+
+	memset(attr, 0, sizeof(*attr));
+	attr->event_handler = rds_iw_qp_event_handler;
+	attr->qp_context = context;
+	attr->cap.max_send_wr = send_size;
+	attr->cap.max_recv_wr = recv_size;
+	attr->cap.max_send_sge = rds_iwdev->max_sge;
+	attr->cap.max_recv_sge = RDS_IW_RECV_SGE;
+	attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+	attr->qp_type = IB_QPT_RC;
+
+	attr->send_cq = ib_create_cq(dev, send_cq_handler,
+				     rds_iw_cq_event_handler,
+				     context, send_size, 0);
+	if (IS_ERR(attr->send_cq)) {
+		ret = PTR_ERR(attr->send_cq);
+		attr->send_cq = NULL;
+		rdsdebug("ib_create_cq send failed: %d\n", ret);
+		goto out;
+	}
+
+	attr->recv_cq = ib_create_cq(dev, recv_cq_handler,
+				     rds_iw_cq_event_handler,
+				     context, recv_size, 0);
+	if (IS_ERR(attr->recv_cq)) {
+		ret = PTR_ERR(attr->recv_cq);
+		attr->recv_cq = NULL;
+		rdsdebug("ib_create_cq send failed: %d\n", ret);
+		goto out;
+	}
+
+	ret = ib_req_notify_cq(attr->send_cq, IB_CQ_NEXT_COMP);
+	if (ret) {
+		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
+		goto out;
+	}
+
+	ret = ib_req_notify_cq(attr->recv_cq, IB_CQ_SOLICITED);
+	if (ret) {
+		rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
+		goto out;
+	}
+
+out:
+	if (ret) {
+		if (attr->send_cq)
+			ib_destroy_cq(attr->send_cq);
+		if (attr->recv_cq)
+			ib_destroy_cq(attr->recv_cq);
+	}
+	return ret;
+}
+
+/*
+ * This needs to be very careful to not leave IS_ERR pointers around for
+ * cleanup to trip over.
+ */
+static int rds_iw_setup_qp(struct rds_connection *conn)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct ib_device *dev = ic->i_cm_id->device;
+	struct ib_qp_init_attr attr;
+	struct rds_iw_device *rds_iwdev;
+	int ret;
+
+	/* rds_iw_add_one creates a rds_iw_device object per IB device,
+	 * and allocates a protection domain, memory range and MR pool
+	 * for each.  If that fails for any reason, it will not register
+	 * the rds_iwdev at all.
+	 */
+	rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
+	if (rds_iwdev == NULL) {
+		if (printk_ratelimit())
+			printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
+					dev->name);
+		return -EOPNOTSUPP;
+	}
+
+	/* Protection domain and memory range */
+	ic->i_pd = rds_iwdev->pd;
+	ic->i_mr = rds_iwdev->mr;
+
+	ret = rds_iw_init_qp_attrs(&attr, rds_iwdev,
+			&ic->i_send_ring, rds_iw_send_cq_comp_handler,
+			&ic->i_recv_ring, rds_iw_recv_cq_comp_handler,
+			conn);
+	if (ret < 0)
+		goto out;
+
+	ic->i_send_cq = attr.send_cq;
+	ic->i_recv_cq = attr.recv_cq;
+
+	/*
+	 * XXX this can fail if max_*_wr is too large?  Are we supposed
+	 * to back off until we get a value that the hardware can support?
+	 */
+	ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
+	if (ret) {
+		rdsdebug("rdma_create_qp failed: %d\n", ret);
+		goto out;
+	}
+
+	ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
+					   ic->i_send_ring.w_nr *
+						sizeof(struct rds_header),
+					   &ic->i_send_hdrs_dma, GFP_KERNEL);
+	if (ic->i_send_hdrs == NULL) {
+		ret = -ENOMEM;
+		rdsdebug("ib_dma_alloc_coherent send failed\n");
+		goto out;
+	}
+
+	ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
+					   ic->i_recv_ring.w_nr *
+						sizeof(struct rds_header),
+					   &ic->i_recv_hdrs_dma, GFP_KERNEL);
+	if (ic->i_recv_hdrs == NULL) {
+		ret = -ENOMEM;
+		rdsdebug("ib_dma_alloc_coherent recv failed\n");
+		goto out;
+	}
+
+	ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
+				       &ic->i_ack_dma, GFP_KERNEL);
+	if (ic->i_ack == NULL) {
+		ret = -ENOMEM;
+		rdsdebug("ib_dma_alloc_coherent ack failed\n");
+		goto out;
+	}
+
+	ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
+	if (ic->i_sends == NULL) {
+		ret = -ENOMEM;
+		rdsdebug("send allocation failed\n");
+		goto out;
+	}
+	rds_iw_send_init_ring(ic);
+
+	ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
+	if (ic->i_recvs == NULL) {
+		ret = -ENOMEM;
+		rdsdebug("recv allocation failed\n");
+		goto out;
+	}
+
+	rds_iw_recv_init_ring(ic);
+	rds_iw_recv_init_ack(ic);
+
+	/* Post receive buffers - as a side effect, this will update
+	 * the posted credit count. */
+	rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
+
+	rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
+		 ic->i_send_cq, ic->i_recv_cq);
+
+out:
+	return ret;
+}
+
+static u32 rds_iw_protocol_compatible(const struct rds_iw_connect_private *dp)
+{
+	u16 common;
+	u32 version = 0;
+
+	/* rdma_cm private data is odd - when there is any private data in the
+	 * request, we will be given a pretty large buffer without telling us the
+	 * original size. The only way to tell the difference is by looking at
+	 * the contents, which are initialized to zero.
+	 * If the protocol version fields aren't set, this is a connection attempt
+	 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
+	 * We really should have changed this for OFED 1.3 :-( */
+	if (dp->dp_protocol_major == 0)
+		return RDS_PROTOCOL_3_0;
+
+	common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IW_SUPPORTED_PROTOCOLS;
+	if (dp->dp_protocol_major == 3 && common) {
+		version = RDS_PROTOCOL_3_0;
+		while ((common >>= 1) != 0)
+			version++;
+	} else if (printk_ratelimit()) {
+		printk(KERN_NOTICE "RDS: Connection from %u.%u.%u.%u using "
+			"incompatible protocol version %u.%u\n",
+			NIPQUAD(dp->dp_saddr),
+			dp->dp_protocol_major,
+			dp->dp_protocol_minor);
+	}
+	return version;
+}
+
+int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
+				    struct rdma_cm_event *event)
+{
+	const struct rds_iw_connect_private *dp = event->param.conn.private_data;
+	struct rds_iw_connect_private dp_rep;
+	struct rds_connection *conn = NULL;
+	struct rds_iw_connection *ic = NULL;
+	struct rdma_conn_param conn_param;
+	struct rds_iw_device *rds_iwdev;
+	u32 version;
+	int err, destroy = 1;
+
+	/* Check whether the remote protocol version matches ours. */
+	version = rds_iw_protocol_compatible(dp);
+	if (!version)
+		goto out;
+
+	rdsdebug("saddr %u.%u.%u.%u daddr %u.%u.%u.%u RDSv%u.%u\n",
+		 NIPQUAD(dp->dp_saddr), NIPQUAD(dp->dp_daddr),
+		 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
+
+	conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport,
+			       GFP_KERNEL);
+	if (IS_ERR(conn)) {
+		rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
+		conn = NULL;
+		goto out;
+	}
+
+	/*
+	 * The connection request may occur while the
+	 * previous connection exist, e.g. in case of failover.
+	 * But as connections may be initiated simultaneously
+	 * by both hosts, we have a random backoff mechanism -
+	 * see the comment above rds_queue_reconnect()
+	 */
+	mutex_lock(&conn->c_cm_lock);
+	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
+		if (rds_conn_state(conn) == RDS_CONN_UP) {
+			rdsdebug("incoming connect while connecting\n");
+			rds_conn_drop(conn);
+			rds_iw_stats_inc(s_iw_listen_closed_stale);
+		} else
+		if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
+			/* Wait and see - our connect may still be succeeding */
+			rds_iw_stats_inc(s_iw_connect_raced);
+		}
+		mutex_unlock(&conn->c_cm_lock);
+		goto out;
+	}
+
+	ic = conn->c_transport_data;
+
+	rds_iw_set_protocol(conn, version);
+	rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+
+	/* If the peer gave us the last packet it saw, process this as if
+	 * we had received a regular ACK. */
+	if (dp->dp_ack_seq)
+		rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
+
+	BUG_ON(cm_id->context);
+	BUG_ON(ic->i_cm_id);
+
+	ic->i_cm_id = cm_id;
+	cm_id->context = conn;
+
+	rds_iwdev = ib_get_client_data(cm_id->device, &rds_iw_client);
+	ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
+
+	/* We got halfway through setting up the ib_connection, if we
+	 * fail now, we have to take the long route out of this mess. */
+	destroy = 0;
+
+	err = rds_iw_setup_qp(conn);
+	if (err) {
+		rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
+		goto out;
+	}
+
+	rds_iw_cm_fill_conn_param(conn, &conn_param, &dp_rep, version);
+
+	/* rdma_accept() calls rdma_reject() internally if it fails */
+	err = rdma_accept(cm_id, &conn_param);
+	mutex_unlock(&conn->c_cm_lock);
+	if (err) {
+		rds_iw_conn_error(conn, "rdma_accept failed (%d)\n", err);
+		goto out;
+	}
+
+	return 0;
+
+out:
+	rdma_reject(cm_id, NULL, 0);
+	return destroy;
+}
+
+
+int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id)
+{
+	struct rds_connection *conn = cm_id->context;
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct rdma_conn_param conn_param;
+	struct rds_iw_connect_private dp;
+	int ret;
+
+	/* If the peer doesn't do protocol negotiation, we must
+	 * default to RDSv3.0 */
+	rds_iw_set_protocol(conn, RDS_PROTOCOL_3_0);
+	ic->i_flowctl = rds_iw_sysctl_flow_control;	/* advertise flow control */
+
+	ret = rds_iw_setup_qp(conn);
+	if (ret) {
+		rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", ret);
+		goto out;
+	}
+
+	rds_iw_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION);
+
+	ret = rdma_connect(cm_id, &conn_param);
+	if (ret)
+		rds_iw_conn_error(conn, "rdma_connect failed (%d)\n", ret);
+
+out:
+	/* Beware - returning non-zero tells the rdma_cm to destroy
+	 * the cm_id. We should certainly not do it as long as we still
+	 * "own" the cm_id. */
+	if (ret) {
+		struct rds_iw_connection *ic = conn->c_transport_data;
+
+		if (ic->i_cm_id == cm_id)
+			ret = 0;
+	}
+	return ret;
+}
+
+int rds_iw_conn_connect(struct rds_connection *conn)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct rds_iw_device *rds_iwdev;
+	struct sockaddr_in src, dest;
+	int ret;
+
+	/* XXX I wonder what affect the port space has */
+	/* delegate cm event handler to rdma_transport */
+	ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
+				     RDMA_PS_TCP);
+	if (IS_ERR(ic->i_cm_id)) {
+		ret = PTR_ERR(ic->i_cm_id);
+		ic->i_cm_id = NULL;
+		rdsdebug("rdma_create_id() failed: %d\n", ret);
+		goto out;
+	}
+
+	rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
+
+	src.sin_family = AF_INET;
+	src.sin_addr.s_addr = (__force u32)conn->c_laddr;
+	src.sin_port = (__force u16)htons(0);
+
+	/* First, bind to the local address and device. */
+	ret = rdma_bind_addr(ic->i_cm_id, (struct sockaddr *) &src);
+	if (ret) {
+		rdsdebug("rdma_bind_addr(%u.%u.%u.%u) failed: %d\n",
+				NIPQUAD(conn->c_laddr), ret);
+		rdma_destroy_id(ic->i_cm_id);
+		ic->i_cm_id = NULL;
+		goto out;
+	}
+
+	rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
+	ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
+
+	dest.sin_family = AF_INET;
+	dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
+	dest.sin_port = (__force u16)htons(RDS_PORT);
+
+	ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
+				(struct sockaddr *)&dest,
+				RDS_RDMA_RESOLVE_TIMEOUT_MS);
+	if (ret) {
+		rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
+			 ret);
+		rdma_destroy_id(ic->i_cm_id);
+		ic->i_cm_id = NULL;
+	}
+
+out:
+	return ret;
+}
+
+/*
+ * This is so careful about only cleaning up resources that were built up
+ * so that it can be called at any point during startup.  In fact it
+ * can be called multiple times for a given connection.
+ */
+void rds_iw_conn_shutdown(struct rds_connection *conn)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	int err = 0;
+	struct ib_qp_attr qp_attr;
+
+	rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
+		 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
+		 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
+
+	if (ic->i_cm_id) {
+		struct ib_device *dev = ic->i_cm_id->device;
+
+		rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
+		err = rdma_disconnect(ic->i_cm_id);
+		if (err) {
+			/* Actually this may happen quite frequently, when
+			 * an outgoing connect raced with an incoming connect.
+			 */
+			rdsdebug("rds_iw_conn_shutdown: failed to disconnect,"
+				   " cm: %p err %d\n", ic->i_cm_id, err);
+		}
+
+		if (ic->i_cm_id->qp) {
+			qp_attr.qp_state = IB_QPS_ERR;
+			ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
+		}
+
+		wait_event(rds_iw_ring_empty_wait,
+			rds_iw_ring_empty(&ic->i_send_ring) &&
+			rds_iw_ring_empty(&ic->i_recv_ring));
+
+		if (ic->i_send_hdrs)
+			ib_dma_free_coherent(dev,
+					   ic->i_send_ring.w_nr *
+						sizeof(struct rds_header),
+					   ic->i_send_hdrs,
+					   ic->i_send_hdrs_dma);
+
+		if (ic->i_recv_hdrs)
+			ib_dma_free_coherent(dev,
+					   ic->i_recv_ring.w_nr *
+						sizeof(struct rds_header),
+					   ic->i_recv_hdrs,
+					   ic->i_recv_hdrs_dma);
+
+		if (ic->i_ack)
+			ib_dma_free_coherent(dev, sizeof(struct rds_header),
+					     ic->i_ack, ic->i_ack_dma);
+
+		if (ic->i_sends)
+			rds_iw_send_clear_ring(ic);
+		if (ic->i_recvs)
+			rds_iw_recv_clear_ring(ic);
+
+		if (ic->i_cm_id->qp)
+			rdma_destroy_qp(ic->i_cm_id);
+		if (ic->i_send_cq)
+			ib_destroy_cq(ic->i_send_cq);
+		if (ic->i_recv_cq)
+			ib_destroy_cq(ic->i_recv_cq);
+
+		/*
+		 * If associated with an rds_iw_device:
+		 * 	Move connection back to the nodev list.
+		 * 	Remove cm_id from the device cm_id list.
+		 */
+		if (ic->rds_iwdev)
+			rds_iw_remove_conn(ic->rds_iwdev, conn);
+
+		rdma_destroy_id(ic->i_cm_id);
+
+		ic->i_cm_id = NULL;
+		ic->i_pd = NULL;
+		ic->i_mr = NULL;
+		ic->i_send_cq = NULL;
+		ic->i_recv_cq = NULL;
+		ic->i_send_hdrs = NULL;
+		ic->i_recv_hdrs = NULL;
+		ic->i_ack = NULL;
+	}
+	BUG_ON(ic->rds_iwdev);
+
+	/* Clear pending transmit */
+	if (ic->i_rm) {
+		rds_message_put(ic->i_rm);
+		ic->i_rm = NULL;
+	}
+
+	/* Clear the ACK state */
+	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+#ifdef KERNEL_HAS_ATOMIC64
+	atomic64_set(&ic->i_ack_next, 0);
+#else
+	ic->i_ack_next = 0;
+#endif
+	ic->i_ack_recv = 0;
+
+	/* Clear flow control state */
+	ic->i_flowctl = 0;
+	atomic_set(&ic->i_credits, 0);
+
+	rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
+	rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
+
+	if (ic->i_iwinc) {
+		rds_inc_put(&ic->i_iwinc->ii_inc);
+		ic->i_iwinc = NULL;
+	}
+
+	vfree(ic->i_sends);
+	ic->i_sends = NULL;
+	vfree(ic->i_recvs);
+	ic->i_recvs = NULL;
+	rdsdebug("shutdown complete\n");
+}
+
+int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
+{
+	struct rds_iw_connection *ic;
+	unsigned long flags;
+
+	/* XXX too lazy? */
+	ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL);
+	if (ic == NULL)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ic->iw_node);
+	mutex_init(&ic->i_recv_mutex);
+#ifndef KERNEL_HAS_ATOMIC64
+	spin_lock_init(&ic->i_ack_lock);
+#endif
+
+	/*
+	 * rds_iw_conn_shutdown() waits for these to be emptied so they
+	 * must be initialized before it can be called.
+	 */
+	rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
+	rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
+
+	ic->conn = conn;
+	conn->c_transport_data = ic;
+
+	spin_lock_irqsave(&iw_nodev_conns_lock, flags);
+	list_add_tail(&ic->iw_node, &iw_nodev_conns);
+	spin_unlock_irqrestore(&iw_nodev_conns_lock, flags);
+
+
+	rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
+	return 0;
+}
+
+/*
+ * Free a connection. Connection must be shut down and not set for reconnect.
+ */
+void rds_iw_conn_free(void *arg)
+{
+	struct rds_iw_connection *ic = arg;
+	spinlock_t	*lock_ptr;
+
+	rdsdebug("ic %p\n", ic);
+
+	/*
+	 * Conn is either on a dev's list or on the nodev list.
+	 * A race with shutdown() or connect() would cause problems
+	 * (since rds_iwdev would change) but that should never happen.
+	 */
+	lock_ptr = ic->rds_iwdev ? &ic->rds_iwdev->spinlock : &iw_nodev_conns_lock;
+
+	spin_lock_irq(lock_ptr);
+	list_del(&ic->iw_node);
+	spin_unlock_irq(lock_ptr);
+
+	kfree(ic);
+}
+
+/*
+ * An error occurred on the connection
+ */
+void
+__rds_iw_conn_error(struct rds_connection *conn, const char *fmt, ...)
+{
+	va_list ap;
+
+	rds_conn_drop(conn);
+
+	va_start(ap, fmt);
+	vprintk(fmt, ap);
+	va_end(ap);
+}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_rdma.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_rdma.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_rdma.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,889 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+
+#include "rds.h"
+#include "rdma.h"
+#include "iw.h"
+
+
+/*
+ * This is stored as mr->r_trans_private.
+ */
+struct rds_iw_mr {
+	struct rds_iw_device	*device;
+	struct rds_iw_mr_pool	*pool;
+	struct rdma_cm_id	*cm_id;
+
+	struct ib_mr	*mr;
+	struct ib_fast_reg_page_list *page_list;
+
+	struct rds_iw_mapping	mapping;
+	unsigned char		remap_count;
+};
+
+/*
+ * Our own little MR pool
+ */
+struct rds_iw_mr_pool {
+	struct rds_iw_device	*device;		/* back ptr to the device that owns us */
+
+	struct mutex		flush_lock;		/* serialize fmr invalidate */
+	struct work_struct	flush_worker;		/* flush worker */
+
+	spinlock_t		list_lock;		/* protect variables below */
+	atomic_t		item_count;		/* total # of MRs */
+	atomic_t		dirty_count;		/* # dirty of MRs */
+	struct list_head	dirty_list;		/* dirty mappings */
+	struct list_head	clean_list;		/* unused & unamapped MRs */
+	atomic_t		free_pinned;		/* memory pinned by free MRs */
+	unsigned long		max_message_size;	/* in pages */
+	unsigned long		max_items;
+	unsigned long		max_items_soft;
+	unsigned long		max_free_pinned;
+	int			max_pages;
+};
+
+static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
+static void rds_iw_mr_pool_flush_worker(struct work_struct *work);
+static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
+static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
+			  struct rds_iw_mr *ibmr,
+			  struct scatterlist *sg, unsigned int nents);
+static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
+static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
+			struct list_head *unmap_list,
+			struct list_head *kill_list);
+static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
+
+static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
+{
+	struct rds_iw_device *iwdev;
+	struct rds_iw_cm_id *i_cm_id;
+
+	*rds_iwdev = NULL;
+	*cm_id = NULL;
+
+	list_for_each_entry(iwdev, &rds_iw_devices, list) {
+		spin_lock_irq(&iwdev->spinlock);
+		list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) {
+			struct sockaddr_in *src_addr, *dst_addr;
+
+			src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr;
+			dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr;
+
+			rdsdebug("%s: local ipaddr = %x port %d, remote ipaddr = %x port %d"
+				 "....looking for %x port %d, remote ipaddr = %x port %d\n",
+				 __func__,
+				src_addr->sin_addr.s_addr,
+				src_addr->sin_port,
+				dst_addr->sin_addr.s_addr,
+				dst_addr->sin_port,
+				rs->rs_bound_addr,
+				rs->rs_bound_port,
+				rs->rs_conn_addr,
+				rs->rs_conn_port);
+#ifdef WORKING_TUPLE_DETECTION
+			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
+			    src_addr->sin_port == rs->rs_bound_port &&
+			    dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
+			    dst_addr->sin_port == rs->rs_conn_port) {
+#else
+			/* FIXME - needs to compare the local and remote ipaddr/port tuple, but the
+			 * ipaddr is the only available infomation in the rds_sock (as the rest are
+			 * zero'ed.  It doesn't appear to be properly populated during connection
+			 * setup...
+			 */
+			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
+#endif
+				spin_unlock_irq(&iwdev->spinlock);
+				*rds_iwdev = iwdev;
+				*cm_id = i_cm_id->cm_id;
+				return 0;
+			}
+		}
+		spin_unlock_irq(&iwdev->spinlock);
+	}
+
+	return 1;
+}
+
+static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
+{
+	struct rds_iw_cm_id *i_cm_id;
+
+	i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL);
+	if (!i_cm_id)
+		return -ENOMEM;
+
+	i_cm_id->cm_id = cm_id;
+
+	spin_lock_irq(&rds_iwdev->spinlock);
+	list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list);
+	spin_unlock_irq(&rds_iwdev->spinlock);
+
+	return 0;
+}
+
+void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
+{
+	struct rds_iw_cm_id *i_cm_id;
+
+	spin_lock_irq(&rds_iwdev->spinlock);
+	list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) {
+		if (i_cm_id->cm_id == cm_id) {
+			list_del(&i_cm_id->list);
+			kfree(i_cm_id);
+			break;
+		}
+	}
+	spin_unlock_irq(&rds_iwdev->spinlock);
+}
+
+
+int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
+{
+	struct sockaddr_in *src_addr, *dst_addr;
+	struct rds_iw_device *rds_iwdev_old;
+	struct rds_sock rs;
+	struct rdma_cm_id *pcm_id;
+	int rc;
+
+	src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
+	dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
+
+	rs.rs_bound_addr = src_addr->sin_addr.s_addr;
+	rs.rs_bound_port = src_addr->sin_port;
+	rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
+	rs.rs_conn_port = dst_addr->sin_port;
+
+	rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
+	if (rc)
+		rds_iw_remove_cm_id(rds_iwdev, cm_id);
+
+	return rds_iw_add_cm_id(rds_iwdev, cm_id);
+}
+
+void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	/* conn was previously on the nodev_conns_list */
+	spin_lock_irq(&iw_nodev_conns_lock);
+	BUG_ON(list_empty(&iw_nodev_conns));
+	BUG_ON(list_empty(&ic->iw_node));
+	list_del(&ic->iw_node);
+
+	spin_lock_irq(&rds_iwdev->spinlock);
+	list_add_tail(&ic->iw_node, &rds_iwdev->conn_list);
+	spin_unlock_irq(&rds_iwdev->spinlock);
+	spin_unlock_irq(&iw_nodev_conns_lock);
+
+	ic->rds_iwdev = rds_iwdev;
+}
+
+void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	/* place conn on nodev_conns_list */
+	spin_lock(&iw_nodev_conns_lock);
+
+	spin_lock_irq(&rds_iwdev->spinlock);
+	BUG_ON(list_empty(&ic->iw_node));
+	list_del(&ic->iw_node);
+	spin_unlock_irq(&rds_iwdev->spinlock);
+
+	list_add_tail(&ic->iw_node, &iw_nodev_conns);
+
+	spin_unlock(&iw_nodev_conns_lock);
+
+	rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id);
+	ic->rds_iwdev = NULL;
+}
+
+void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock)
+{
+	struct rds_iw_connection *ic, *_ic;
+	LIST_HEAD(tmp_list);
+
+	/* avoid calling conn_destroy with irqs off */
+	spin_lock_irq(list_lock);
+	list_splice(list, &tmp_list);
+	INIT_LIST_HEAD(list);
+	spin_unlock_irq(list_lock);
+
+	list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) {
+		if (ic->conn->c_passive)
+			rds_conn_destroy(ic->conn->c_passive);
+		rds_conn_destroy(ic->conn);
+	}
+}
+
+static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
+		struct scatterlist *list, unsigned int sg_len)
+{
+	sg->list = list;
+	sg->len = sg_len;
+	sg->dma_len = 0;
+	sg->dma_npages = 0;
+	sg->bytes = 0;
+}
+
+static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
+			struct rds_iw_scatterlist *sg,
+			unsigned int dma_page_shift)
+{
+	struct ib_device *dev = rds_iwdev->dev;
+	u64 *dma_pages = NULL;
+	u64 dma_mask;
+	unsigned int dma_page_size;
+	int i, j, ret;
+
+	dma_page_size = 1 << dma_page_shift;
+	dma_mask = dma_page_size - 1;
+
+	WARN_ON(sg->dma_len);
+
+	sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
+	if (unlikely(!sg->dma_len)) {
+		printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n");
+		return ERR_PTR(-EBUSY);
+	}
+
+	sg->bytes = 0;
+	sg->dma_npages = 0;
+
+	ret = -EINVAL;
+	for (i = 0; i < sg->dma_len; ++i) {
+		unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
+		u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
+		u64 end_addr;
+
+		sg->bytes += dma_len;
+
+		end_addr = dma_addr + dma_len;
+		if (dma_addr & dma_mask) {
+			if (i > 0)
+				goto out_unmap;
+			dma_addr &= ~dma_mask;
+		}
+		if (end_addr & dma_mask) {
+			if (i < sg->dma_len - 1)
+				goto out_unmap;
+			end_addr = (end_addr + dma_mask) & ~dma_mask;
+		}
+
+		sg->dma_npages += (end_addr - dma_addr) >> dma_page_shift;
+	}
+
+	/* Now gather the dma addrs into one list */
+	if (sg->dma_npages > fastreg_message_size)
+		goto out_unmap;
+
+	dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC);
+	if (!dma_pages) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
+
+	for (i = j = 0; i < sg->dma_len; ++i) {
+		unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
+		u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
+		u64 end_addr;
+
+		end_addr = dma_addr + dma_len;
+		dma_addr &= ~dma_mask;
+		for (; dma_addr < end_addr; dma_addr += dma_page_size)
+			dma_pages[j++] = dma_addr;
+		BUG_ON(j > sg->dma_npages);
+	}
+
+	return dma_pages;
+
+out_unmap:
+	ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
+	sg->dma_len = 0;
+	kfree(dma_pages);
+	return ERR_PTR(ret);
+}
+
+
+struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev)
+{
+	struct rds_iw_mr_pool *pool;
+
+	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool) {
+		printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pool->device = rds_iwdev;
+	INIT_LIST_HEAD(&pool->dirty_list);
+	INIT_LIST_HEAD(&pool->clean_list);
+	mutex_init(&pool->flush_lock);
+	spin_lock_init(&pool->list_lock);
+	INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker);
+
+	pool->max_message_size = fastreg_message_size;
+	pool->max_items = fastreg_pool_size;
+	pool->max_free_pinned = pool->max_items * pool->max_message_size / 4;
+	pool->max_pages = fastreg_message_size;
+
+	/* We never allow more than max_items MRs to be allocated.
+	 * When we exceed more than max_items_soft, we start freeing
+	 * items more aggressively.
+	 * Make sure that max_items > max_items_soft > max_items / 2
+	 */
+	pool->max_items_soft = pool->max_items * 3 / 4;
+
+	return pool;
+}
+
+void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo)
+{
+	struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
+
+	iinfo->rdma_mr_max = pool->max_items;
+	iinfo->rdma_mr_size = pool->max_pages;
+}
+
+void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool)
+{
+	flush_workqueue(rds_wq);
+	rds_iw_flush_mr_pool(pool, 1);
+	BUG_ON(atomic_read(&pool->item_count));
+	BUG_ON(atomic_read(&pool->free_pinned));
+	kfree(pool);
+}
+
+static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool)
+{
+	struct rds_iw_mr *ibmr = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pool->list_lock, flags);
+	if (!list_empty(&pool->clean_list)) {
+		ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
+		list_del_init(&ibmr->mapping.m_list);
+	}
+	spin_unlock_irqrestore(&pool->list_lock, flags);
+
+	return ibmr;
+}
+
+static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev)
+{
+	struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
+	struct rds_iw_mr *ibmr = NULL;
+	int err = 0, iter = 0;
+
+	while (1) {
+		if ((ibmr = rds_iw_reuse_fmr(pool)) != NULL)
+			return ibmr;
+
+		/* No clean MRs - now we have the choice of either
+		 * allocating a fresh MR up to the limit imposed by the
+		 * driver, or flush any dirty unused MRs.
+		 * We try to avoid stalling in the send path if possible,
+		 * so we allocate as long as we're allowed to.
+		 *
+		 * We're fussy with enforcing the FMR limit, though. If the driver
+		 * tells us we can't use more than N fmrs, we shouldn't start
+		 * arguing with it */
+		if (atomic_inc_return(&pool->item_count) <= pool->max_items)
+			break;
+
+		atomic_dec(&pool->item_count);
+
+		if (++iter > 2) {
+			rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted);
+			return ERR_PTR(-EAGAIN);
+		}
+
+		/* We do have some empty MRs. Flush them out. */
+		rds_iw_stats_inc(s_iw_rdma_mr_pool_wait);
+		rds_iw_flush_mr_pool(pool, 0);
+	}
+
+	ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
+	if (!ibmr) {
+		err = -ENOMEM;
+		goto out_no_cigar;
+	}
+
+	spin_lock_init(&ibmr->mapping.m_lock);
+	INIT_LIST_HEAD(&ibmr->mapping.m_list);
+	ibmr->mapping.m_mr = ibmr;
+
+	err = rds_iw_init_fastreg(pool, ibmr);
+	if (err)
+		goto out_no_cigar;
+
+	rds_iw_stats_inc(s_iw_rdma_mr_alloc);
+	return ibmr;
+
+out_no_cigar:
+	if (ibmr) {
+		rds_iw_destroy_fastreg(pool, ibmr);
+		kfree(ibmr);
+	}
+	atomic_dec(&pool->item_count);
+	return ERR_PTR(err);
+}
+
+void rds_iw_sync_mr(void *trans_private, int direction)
+{
+	struct rds_iw_mr *ibmr = trans_private;
+	struct rds_iw_device *rds_iwdev = ibmr->device;
+
+	switch (direction) {
+	case DMA_FROM_DEVICE:
+		ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list,
+			ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
+		break;
+	case DMA_TO_DEVICE:
+		ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list,
+			ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
+		break;
+	}
+}
+
+static inline unsigned int rds_iw_flush_goal(struct rds_iw_mr_pool *pool, int free_all)
+{
+	unsigned int item_count;
+
+	item_count = atomic_read(&pool->item_count);
+	if (free_all)
+		return item_count;
+
+	return 0;
+}
+
+/*
+ * Flush our pool of MRs.
+ * At a minimum, all currently unused MRs are unmapped.
+ * If the number of MRs allocated exceeds the limit, we also try
+ * to free as many MRs as needed to get back to this limit.
+ */
+static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
+{
+	struct rds_iw_mr *ibmr, *next;
+	LIST_HEAD(unmap_list);
+	LIST_HEAD(kill_list);
+	unsigned long flags;
+	unsigned int nfreed = 0, ncleaned = 0, free_goal;
+	int ret = 0;
+
+	rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
+
+	mutex_lock(&pool->flush_lock);
+
+	spin_lock_irqsave(&pool->list_lock, flags);
+	/* Get the list of all mappings to be destroyed */
+	list_splice_init(&pool->dirty_list, &unmap_list);
+	if (free_all)
+		list_splice_init(&pool->clean_list, &kill_list);
+	spin_unlock_irqrestore(&pool->list_lock, flags);
+
+	free_goal = rds_iw_flush_goal(pool, free_all);
+
+	/* Batched invalidate of dirty MRs.
+	 * For FMR based MRs, the mappings on the unmap list are
+	 * actually members of an ibmr (ibmr->mapping). They either
+	 * migrate to the kill_list, or have been cleaned and should be
+	 * moved to the clean_list.
+	 * For fastregs, they will be dynamically allocated, and
+	 * will be destroyed by the unmap function.
+	 */
+	if (!list_empty(&unmap_list)) {
+		ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list);
+		/* If we've been asked to destroy all MRs, move those
+		 * that were simply cleaned to the kill list */
+		if (free_all)
+			list_splice_init(&unmap_list, &kill_list);
+	}
+
+	/* Destroy any MRs that are past their best before date */
+	list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) {
+		rds_iw_stats_inc(s_iw_rdma_mr_free);
+		list_del(&ibmr->mapping.m_list);
+		rds_iw_destroy_fastreg(pool, ibmr);
+		kfree(ibmr);
+		nfreed++;
+	}
+
+	/* Anything that remains are laundered ibmrs, which we can add
+	 * back to the clean list. */
+	if (!list_empty(&unmap_list)) {
+		spin_lock_irqsave(&pool->list_lock, flags);
+		list_splice(&unmap_list, &pool->clean_list);
+		spin_unlock_irqrestore(&pool->list_lock, flags);
+	}
+
+	atomic_sub(ncleaned, &pool->dirty_count);
+	atomic_sub(nfreed, &pool->item_count);
+
+	mutex_unlock(&pool->flush_lock);
+	return ret;
+}
+
+static void rds_iw_mr_pool_flush_worker(struct work_struct *work)
+{
+	struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker);
+
+	rds_iw_flush_mr_pool(pool, 0);
+}
+
+void rds_iw_free_mr(void *trans_private, int invalidate)
+{
+	struct rds_iw_mr *ibmr = trans_private;
+	struct rds_iw_mr_pool *pool = ibmr->device->mr_pool;
+
+	rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len);
+	if (!pool)
+		return;
+
+	/* Return it to the pool's free list */
+	rds_iw_free_fastreg(pool, ibmr);
+
+	/* If we've pinned too many pages, request a flush */
+	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
+	 || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+		queue_work(rds_wq, &pool->flush_worker);
+
+	if (invalidate) {
+		if (likely(!in_interrupt())) {
+			rds_iw_flush_mr_pool(pool, 0);
+		} else {
+			/* We get here if the user created a MR marked
+			 * as use_once and invalidate at the same time. */
+			queue_work(rds_wq, &pool->flush_worker);
+		}
+	}
+}
+
+void rds_iw_flush_mrs(void)
+{
+	struct rds_iw_device *rds_iwdev;
+
+	list_for_each_entry(rds_iwdev, &rds_iw_devices, list) {
+		struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
+
+		if (pool)
+			rds_iw_flush_mr_pool(pool, 0);
+	}
+}
+
+void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
+		    struct rds_sock *rs, u32 *key_ret)
+{
+	struct rds_iw_device *rds_iwdev;
+	struct rds_iw_mr *ibmr = NULL;
+	struct rdma_cm_id *cm_id;
+	int ret;
+
+	ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
+	if (ret || !cm_id) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (!rds_iwdev->mr_pool) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ibmr = rds_iw_alloc_mr(rds_iwdev);
+	if (IS_ERR(ibmr))
+		return ibmr;
+
+	ibmr->cm_id = cm_id;
+	ibmr->device = rds_iwdev;
+
+	ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents);
+	if (ret == 0)
+		*key_ret = ibmr->mr->rkey;
+	else
+		printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret);
+
+out:
+	if (ret) {
+		if (ibmr)
+			rds_iw_free_mr(ibmr, 0);
+		ibmr = ERR_PTR(ret);
+	}
+	return ibmr;
+}
+
+/*
+ * iWARP fastreg handling
+ *
+ * The life cycle of a fastreg registration is a bit different from
+ * FMRs.
+ * The idea behind fastreg is to have one MR, to which we bind different
+ * mappings over time. To avoid stalling on the expensive map and invalidate
+ * operations, these operations are pipelined on the same send queue on
+ * which we want to send the message containing the r_key.
+ *
+ * This creates a bit of a problem for us, as we do not have the destination
+ * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
+ * RDMA to be correctly setup.  If a fastreg request is present, rds_iw_xmit
+ * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request
+ * before queuing the SEND. When completions for these arrive, they are
+ * dispatched to the MR has a bit set showing that RDMa can be performed.
+ *
+ * There is another interesting aspect that's related to invalidation.
+ * The application can request that a mapping is invalidated in FREE_MR.
+ * The expectation there is that this invalidation step includes ALL
+ * PREVIOUSLY FREED MRs.
+ */
+static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
+				struct rds_iw_mr *ibmr)
+{
+	struct rds_iw_device *rds_iwdev = pool->device;
+	struct ib_fast_reg_page_list *page_list = NULL;
+	struct ib_mr *mr;
+	int err;
+
+	mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size);
+	if (IS_ERR(mr)) {
+		err = PTR_ERR(mr);
+
+		printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err);
+		return err;
+	}
+
+	/* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages
+	 * is not filled in.
+	 */
+	page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
+	if (IS_ERR(page_list)) {
+		err = PTR_ERR(page_list);
+
+		printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err);
+		ib_dereg_mr(mr);
+		return err;
+	}
+
+	ibmr->page_list = page_list;
+	ibmr->mr = mr;
+	return 0;
+}
+
+static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
+{
+	struct rds_iw_mr *ibmr = mapping->m_mr;
+	struct ib_send_wr f_wr, *failed_wr;
+	int ret;
+
+	/*
+	 * Perform a WR for the fast_reg_mr. Each individual page
+	 * in the sg list is added to the fast reg page list and placed
+	 * inside the fast_reg_mr WR.  The key used is a rolling 8bit
+	 * counter, which should guarantee uniqueness.
+	 */
+	ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
+	mapping->m_rkey = ibmr->mr->rkey;
+
+	memset(&f_wr, 0, sizeof(f_wr));
+	f_wr.wr_id = RDS_IW_FAST_REG_WR_ID;
+	f_wr.opcode = IB_WR_FAST_REG_MR;
+	f_wr.wr.fast_reg.length = mapping->m_sg.bytes;
+	f_wr.wr.fast_reg.rkey = mapping->m_rkey;
+	f_wr.wr.fast_reg.page_list = ibmr->page_list;
+	f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len;
+	f_wr.wr.fast_reg.page_shift = ibmr->device->page_shift;
+	f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE |
+				IB_ACCESS_REMOTE_READ |
+				IB_ACCESS_REMOTE_WRITE;
+	f_wr.wr.fast_reg.iova_start = 0;
+	f_wr.send_flags = IB_SEND_SIGNALED;
+
+	failed_wr = &f_wr;
+	ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
+	BUG_ON(failed_wr != &f_wr);
+	if (ret) {
+		printk(KERN_WARNING "RDS/IW: %s %d ib_post_send returned %d\n",
+			__func__, __LINE__, ret);
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr)
+{
+	struct ib_send_wr s_wr, *failed_wr;
+	int ret = 0;
+
+	if (!ibmr->cm_id->qp || !ibmr->mr)
+		goto out;
+
+	memset(&s_wr, 0, sizeof(s_wr));
+	s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID;
+	s_wr.opcode = IB_WR_LOCAL_INV;
+	s_wr.ex.invalidate_rkey = ibmr->mr->rkey;
+	s_wr.send_flags = IB_SEND_SIGNALED;
+
+	failed_wr = &s_wr;
+	ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
+	if (ret) {
+		printk(KERN_WARNING "RDS/IW: %s %d ib_post_send returned %d\n",
+			__func__, __LINE__, ret);
+		goto out;
+	}
+out:
+	return ret;
+}
+
+static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
+			struct rds_iw_mr *ibmr,
+			struct scatterlist *sg,
+			unsigned int sg_len)
+{
+	struct rds_iw_device *rds_iwdev = pool->device;
+	struct rds_iw_mapping *mapping = &ibmr->mapping;
+	u64 *dma_pages;
+	int i, ret = 0;
+
+	rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
+
+	dma_pages = rds_iw_map_scatterlist(rds_iwdev,
+				&mapping->m_sg,
+				rds_iwdev->page_shift);
+	if (IS_ERR(dma_pages)) {
+		ret = PTR_ERR(dma_pages);
+		dma_pages = NULL;
+		goto out;
+	}
+
+	if (mapping->m_sg.dma_len > pool->max_message_size) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
+	for (i = 0; i < mapping->m_sg.dma_npages; ++i)
+		ibmr->page_list->page_list[i] = dma_pages[i];
+
+	ret = rds_iw_rdma_build_fastreg(mapping);
+	if (ret)
+		goto out;
+
+	rds_iw_stats_inc(s_iw_rdma_mr_used);
+
+out:
+	kfree(dma_pages);
+
+	return ret;
+}
+
+/*
+ * "Free" a fastreg MR.
+ */
+static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
+		struct rds_iw_mr *ibmr)
+{
+	unsigned long flags;
+	int ret;
+
+	if (!ibmr->mapping.m_sg.dma_len)
+		return;
+
+	ret = rds_iw_rdma_fastreg_inv(ibmr);
+	if (ret)
+		return;
+
+	/* Try to post the LOCAL_INV WR to the queue. */
+	spin_lock_irqsave(&pool->list_lock, flags);
+
+	list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
+	atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
+	atomic_inc(&pool->dirty_count);
+
+	spin_unlock_irqrestore(&pool->list_lock, flags);
+}
+
+static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
+				struct list_head *unmap_list,
+				struct list_head *kill_list)
+{
+	struct rds_iw_mapping *mapping, *next;
+	unsigned int ncleaned = 0;
+	LIST_HEAD(laundered);
+
+	/* Batched invalidation of fastreg MRs.
+	 * Why do we do it this way, even though we could pipeline unmap
+	 * and remap? The reason is the application semantics - when the
+	 * application requests an invalidation of MRs, it expects all
+	 * previously released R_Keys to become invalid.
+	 *
+	 * If we implement MR reuse naively, we risk memory corruption
+	 * (this has actually been observed). So the default behavior
+	 * requires that a MR goes through an explicit unmap operation before
+	 * we can reuse it again.
+	 *
+	 * We could probably improve on this a little, by allowing immediate
+	 * reuse of a MR on the same socket (eg you could add small
+	 * cache of unused MRs to strct rds_socket - GET_MR could grab one
+	 * of these without requiring an explicit invalidate).
+	 */
+	while (!list_empty(unmap_list)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&pool->list_lock, flags);
+		list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
+			list_move(&mapping->m_list, &laundered);
+			ncleaned++;
+		}
+		spin_unlock_irqrestore(&pool->list_lock, flags);
+	}
+
+	/* Move all laundered mappings back to the unmap list.
+	 * We do not kill any WRs right now - it doesn't seem the
+	 * fastreg API has a max_remap limit. */
+	list_splice_init(&laundered, unmap_list);
+
+	return ncleaned;
+}
+
+static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool,
+		struct rds_iw_mr *ibmr)
+{
+	if (ibmr->page_list)
+		ib_free_fast_reg_page_list(ibmr->page_list);
+	if (ibmr->mr)
+		ib_dereg_mr(ibmr->mr);
+}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_recv.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_recv.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_recv.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,902 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <rdma/rdma_cm.h>
+
+#include "rds.h"
+#include "iw.h"
+
+static struct kmem_cache *rds_iw_incoming_slab;
+static struct kmem_cache *rds_iw_frag_slab;
+static atomic_t	rds_iw_allocation = ATOMIC_INIT(0);
+
+static void rds_iw_frag_drop_page(struct rds_page_frag *frag)
+{
+	rdsdebug("frag %p page %p\n", frag, frag->f_page);
+	__free_page(frag->f_page);
+	frag->f_page = NULL;
+}
+
+static void rds_iw_frag_free(struct rds_page_frag *frag)
+{
+	rdsdebug("frag %p page %p\n", frag, frag->f_page);
+	BUG_ON(frag->f_page != NULL);
+	kmem_cache_free(rds_iw_frag_slab, frag);
+}
+
+/*
+ * We map a page at a time.  Its fragments are posted in order.  This
+ * is called in fragment order as the fragments get send completion events.
+ * Only the last frag in the page performs the unmapping.
+ *
+ * It's OK for ring cleanup to call this in whatever order it likes because
+ * DMA is not in flight and so we can unmap while other ring entries still
+ * hold page references in their frags.
+ */
+static void rds_iw_recv_unmap_page(struct rds_iw_connection *ic,
+				   struct rds_iw_recv_work *recv)
+{
+	struct rds_page_frag *frag = recv->r_frag;
+
+	rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
+	if (frag->f_mapped)
+		ib_dma_unmap_page(ic->i_cm_id->device,
+			       frag->f_mapped,
+			       RDS_FRAG_SIZE, DMA_FROM_DEVICE);
+	frag->f_mapped = 0;
+}
+
+void rds_iw_recv_init_ring(struct rds_iw_connection *ic)
+{
+	struct rds_iw_recv_work *recv;
+	u32 i;
+
+	for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
+		struct ib_sge *sge;
+
+		recv->r_iwinc = NULL;
+		recv->r_frag = NULL;
+
+		recv->r_wr.next = NULL;
+		recv->r_wr.wr_id = i;
+		recv->r_wr.sg_list = recv->r_sge;
+		recv->r_wr.num_sge = RDS_IW_RECV_SGE;
+
+		sge = rds_iw_data_sge(ic, recv->r_sge);
+		sge->addr = 0;
+		sge->length = RDS_FRAG_SIZE;
+		sge->lkey = 0;
+
+		sge = rds_iw_header_sge(ic, recv->r_sge);
+		sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
+		sge->length = sizeof(struct rds_header);
+		sge->lkey = 0;
+	}
+}
+
+static void rds_iw_recv_clear_one(struct rds_iw_connection *ic,
+				  struct rds_iw_recv_work *recv)
+{
+	if (recv->r_iwinc) {
+		rds_inc_put(&recv->r_iwinc->ii_inc);
+		recv->r_iwinc = NULL;
+	}
+	if (recv->r_frag) {
+		rds_iw_recv_unmap_page(ic, recv);
+		if (recv->r_frag->f_page)
+			rds_iw_frag_drop_page(recv->r_frag);
+		rds_iw_frag_free(recv->r_frag);
+		recv->r_frag = NULL;
+	}
+}
+
+void rds_iw_recv_clear_ring(struct rds_iw_connection *ic)
+{
+	u32 i;
+
+	for (i = 0; i < ic->i_recv_ring.w_nr; i++)
+		rds_iw_recv_clear_one(ic, &ic->i_recvs[i]);
+
+	if (ic->i_frag.f_page)
+		rds_iw_frag_drop_page(&ic->i_frag);
+}
+
+static int rds_iw_recv_refill_one(struct rds_connection *conn,
+				  struct rds_iw_recv_work *recv,
+				  gfp_t kptr_gfp, gfp_t page_gfp)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	dma_addr_t dma_addr;
+	struct ib_sge *sge;
+	int ret = -ENOMEM;
+
+	if (recv->r_iwinc == NULL) {
+		if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) {
+			rds_iw_stats_inc(s_iw_rx_alloc_limit);
+			goto out;
+		}
+		recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
+						 kptr_gfp);
+		if (recv->r_iwinc == NULL)
+			goto out;
+		atomic_inc(&rds_iw_allocation);
+		INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
+		rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
+	}
+
+	if (recv->r_frag == NULL) {
+		recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp);
+		if (recv->r_frag == NULL)
+			goto out;
+		INIT_LIST_HEAD(&recv->r_frag->f_item);
+		recv->r_frag->f_page = NULL;
+	}
+
+	if (ic->i_frag.f_page == NULL) {
+		ic->i_frag.f_page = alloc_page(page_gfp);
+		if (ic->i_frag.f_page == NULL)
+			goto out;
+		ic->i_frag.f_offset = 0;
+	}
+
+	dma_addr = ib_dma_map_page(ic->i_cm_id->device,
+				  ic->i_frag.f_page,
+				  ic->i_frag.f_offset,
+				  RDS_FRAG_SIZE,
+				  DMA_FROM_DEVICE);
+	if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
+		goto out;
+
+	/*
+	 * Once we get the RDS_PAGE_LAST_OFF frag then rds_iw_frag_unmap()
+	 * must be called on this recv.  This happens as completions hit
+	 * in order or on connection shutdown.
+	 */
+	recv->r_frag->f_page = ic->i_frag.f_page;
+	recv->r_frag->f_offset = ic->i_frag.f_offset;
+	recv->r_frag->f_mapped = dma_addr;
+
+	sge = rds_iw_data_sge(ic, recv->r_sge);
+	sge->addr = dma_addr;
+	sge->length = RDS_FRAG_SIZE;
+
+	sge = rds_iw_header_sge(ic, recv->r_sge);
+	sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
+	sge->length = sizeof(struct rds_header);
+
+	get_page(recv->r_frag->f_page);
+
+	if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
+		ic->i_frag.f_offset += RDS_FRAG_SIZE;
+	} else {
+		put_page(ic->i_frag.f_page);
+		ic->i_frag.f_page = NULL;
+		ic->i_frag.f_offset = 0;
+	}
+
+	ret = 0;
+out:
+	return ret;
+}
+
+/*
+ * This tries to allocate and post unused work requests after making sure that
+ * they have all the allocations they need to queue received fragments into
+ * sockets.  The i_recv_mutex is held here so that ring_alloc and _unalloc
+ * pairs don't go unmatched.
+ *
+ * -1 is returned if posting fails due to temporary resource exhaustion.
+ */
+int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
+		       gfp_t page_gfp, int prefill)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct rds_iw_recv_work *recv;
+	struct ib_recv_wr *failed_wr;
+	unsigned int posted = 0;
+	int ret = 0;
+	u32 pos;
+
+	while ((prefill || rds_conn_up(conn))
+			&& rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
+		if (pos >= ic->i_recv_ring.w_nr) {
+			printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
+					pos);
+			ret = -EINVAL;
+			break;
+		}
+
+		recv = &ic->i_recvs[pos];
+		ret = rds_iw_recv_refill_one(conn, recv, kptr_gfp, page_gfp);
+		if (ret) {
+			ret = -1;
+			break;
+		}
+
+		/* XXX when can this fail? */
+		ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
+		rdsdebug("recv %p iwinc %p page %p addr %lu ret %d\n", recv,
+			 recv->r_iwinc, recv->r_frag->f_page,
+			 (long) recv->r_frag->f_mapped, ret);
+		if (ret) {
+			rds_iw_conn_error(conn, "recv post on "
+			       "%u.%u.%u.%u returned %d, disconnecting and "
+			       "reconnecting\n", NIPQUAD(conn->c_faddr),
+			       ret);
+			ret = -1;
+			break;
+		}
+
+		posted++;
+	}
+
+	/* We're doing flow control - update the window. */
+	if (ic->i_flowctl && posted)
+		rds_iw_advertise_credits(conn, posted);
+
+	if (ret)
+		rds_iw_ring_unalloc(&ic->i_recv_ring, 1);
+	return ret;
+}
+
+void rds_iw_inc_purge(struct rds_incoming *inc)
+{
+	struct rds_iw_incoming *iwinc;
+	struct rds_page_frag *frag;
+	struct rds_page_frag *pos;
+
+	iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
+	rdsdebug("purging iwinc %p inc %p\n", iwinc, inc);
+
+	list_for_each_entry_safe(frag, pos, &iwinc->ii_frags, f_item) {
+		list_del_init(&frag->f_item);
+		rds_iw_frag_drop_page(frag);
+		rds_iw_frag_free(frag);
+	}
+}
+
+void rds_iw_inc_free(struct rds_incoming *inc)
+{
+	struct rds_iw_incoming *iwinc;
+
+	iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
+
+	rds_iw_inc_purge(inc);
+	rdsdebug("freeing iwinc %p inc %p\n", iwinc, inc);
+	BUG_ON(!list_empty(&iwinc->ii_frags));
+	kmem_cache_free(rds_iw_incoming_slab, iwinc);
+	atomic_dec(&rds_iw_allocation);
+	BUG_ON(atomic_read(&rds_iw_allocation) < 0);
+}
+
+int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
+			    size_t size)
+{
+	struct rds_iw_incoming *iwinc;
+	struct rds_page_frag *frag;
+	struct iovec *iov = first_iov;
+	unsigned long to_copy;
+	unsigned long frag_off = 0;
+	unsigned long iov_off = 0;
+	int copied = 0;
+	int ret;
+	u32 len;
+
+	iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
+	frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
+	len = be32_to_cpu(inc->i_hdr.h_len);
+
+	while (copied < size && copied < len) {
+		if (frag_off == RDS_FRAG_SIZE) {
+			frag = list_entry(frag->f_item.next,
+					  struct rds_page_frag, f_item);
+			frag_off = 0;
+		}
+		while (iov_off == iov->iov_len) {
+			iov_off = 0;
+			iov++;
+		}
+
+		to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
+		to_copy = min_t(size_t, to_copy, size - copied);
+		to_copy = min_t(unsigned long, to_copy, len - copied);
+
+		rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
+			 "[%p, %lu] + %lu\n",
+			 to_copy, iov->iov_base, iov->iov_len, iov_off,
+			 frag->f_page, frag->f_offset, frag_off);
+
+		/* XXX needs + offset for multiple recvs per page */
+		ret = rds_page_copy_to_user(frag->f_page,
+					    frag->f_offset + frag_off,
+					    iov->iov_base + iov_off,
+					    to_copy);
+		if (ret) {
+			copied = ret;
+			break;
+		}
+
+		iov_off += to_copy;
+		frag_off += to_copy;
+		copied += to_copy;
+	}
+
+	return copied;
+}
+
+/* ic starts out kzalloc()ed */
+void rds_iw_recv_init_ack(struct rds_iw_connection *ic)
+{
+	struct ib_send_wr *wr = &ic->i_ack_wr;
+	struct ib_sge *sge = &ic->i_ack_sge;
+
+	sge->addr = ic->i_ack_dma;
+	sge->length = sizeof(struct rds_header);
+	sge->lkey = rds_iw_local_dma_lkey(ic);
+
+	wr->sg_list = sge;
+	wr->num_sge = 1;
+	wr->opcode = IB_WR_SEND;
+	wr->wr_id = RDS_IW_ACK_WR_ID;
+	wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+}
+
+/*
+ * You'd think that with reliable IB connections you wouldn't need to ack
+ * messages that have been received.  The problem is that IB hardware generates
+ * an ack message before it has DMAed the message into memory.  This creates a
+ * potential message loss if the HCA is disabled for any reason between when it
+ * sends the ack and before the message is DMAed and processed.  This is only a
+ * potential issue if another HCA is available for fail-over.
+ *
+ * When the remote host receives our ack they'll free the sent message from
+ * their send queue.  To decrease the latency of this we always send an ack
+ * immediately after we've received messages.
+ *
+ * For simplicity, we only have one ack in flight at a time.  This puts
+ * pressure on senders to have deep enough send queues to absorb the latency of
+ * a single ack frame being in flight.  This might not be good enough.
+ *
+ * This is implemented by have a long-lived send_wr and sge which point to a
+ * statically allocated ack frame.  This ack wr does not fall under the ring
+ * accounting that the tx and rx wrs do.  The QP attribute specifically makes
+ * room for it beyond the ring size.  Send completion notices its special
+ * wr_id and avoids working with the ring in that case.
+ */
+#ifndef KERNEL_HAS_ATOMIC64
+static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+				int ack_required)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ic->i_ack_lock, flags);
+	ic->i_ack_next = seq;
+	if (ack_required)
+		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
+}
+
+static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+{
+	unsigned long flags;
+	u64 seq;
+
+	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+
+	spin_lock_irqsave(&ic->i_ack_lock, flags);
+	seq = ic->i_ack_next;
+	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
+
+	return seq;
+}
+#else
+static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+				int ack_required)
+{
+	atomic64_set(&ic->i_ack_next, seq);
+	if (ack_required) {
+		smp_mb__before_clear_bit();
+		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+	}
+}
+
+static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+{
+	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+	smp_mb__after_clear_bit();
+
+	return atomic64_read(&ic->i_ack_next);
+}
+#endif
+
+
+static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits)
+{
+	struct rds_header *hdr = ic->i_ack;
+	struct ib_send_wr *failed_wr;
+	u64 seq;
+	int ret;
+
+	seq = rds_iw_get_ack(ic);
+
+	rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
+	rds_message_populate_header(hdr, 0, 0, 0);
+	hdr->h_ack = cpu_to_be64(seq);
+	hdr->h_credit = adv_credits;
+	rds_message_make_checksum(hdr);
+	ic->i_ack_queued = jiffies;
+
+	ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
+	if (unlikely(ret)) {
+		/* Failed to send. Release the WR, and
+		 * force another ACK.
+		 */
+		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+
+		rds_iw_stats_inc(s_iw_ack_send_failure);
+		/* Need to finesse this later. */
+		BUG();
+	} else
+		rds_iw_stats_inc(s_iw_ack_sent);
+}
+
+/*
+ * There are 3 ways of getting acknowledgements to the peer:
+ *  1.	We call rds_iw_attempt_ack from the recv completion handler
+ *	to send an ACK-only frame.
+ *	However, there can be only one such frame in the send queue
+ *	at any time, so we may have to postpone it.
+ *  2.	When another (data) packet is transmitted while there's
+ *	an ACK in the queue, we piggyback the ACK sequence number
+ *	on the data packet.
+ *  3.	If the ACK WR is done sending, we get called from the
+ *	send queue completion handler, and check whether there's
+ *	another ACK pending (postponed because the WR was on the
+ *	queue). If so, we transmit it.
+ *
+ * We maintain 2 variables:
+ *  -	i_ack_flags, which keeps track of whether the ACK WR
+ *	is currently in the send queue or not (IB_ACK_IN_FLIGHT)
+ *  -	i_ack_next, which is the last sequence number we received
+ *
+ * Potentially, send queue and receive queue handlers can run concurrently.
+ * It would be nice to not have to use a spinlock to synchronize things,
+ * but the one problem that rules this out is that 64bit updates are
+ * not atomic on all platforms. Things would be a lot simpler if
+ * we had atomic64 or maybe cmpxchg64 everywhere.
+ *
+ * Reconnecting complicates this picture just slightly. When we
+ * reconnect, we may be seeing duplicate packets. The peer
+ * is retransmitting them, because it hasn't seen an ACK for
+ * them. It is important that we ACK these.
+ *
+ * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
+ * this flag set *MUST* be acknowledged immediately.
+ */
+
+/*
+ * When we get here, we're called from the recv queue handler.
+ * Check whether we ought to transmit an ACK.
+ */
+void rds_iw_attempt_ack(struct rds_iw_connection *ic)
+{
+	unsigned int adv_credits;
+
+	if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
+		return;
+
+	if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
+		rds_iw_stats_inc(s_iw_ack_send_delayed);
+		return;
+	}
+
+	/* Can we get a send credit? */
+	if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
+		rds_iw_stats_inc(s_iw_tx_throttle);
+		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+		return;
+	}
+
+	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+	rds_iw_send_ack(ic, adv_credits);
+}
+
+/*
+ * We get here from the send completion handler, when the
+ * adapter tells us the ACK frame was sent.
+ */
+void rds_iw_ack_send_complete(struct rds_iw_connection *ic)
+{
+	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+	rds_iw_attempt_ack(ic);
+}
+
+/*
+ * This is called by the regular xmit code when it wants to piggyback
+ * an ACK on an outgoing frame.
+ */
+u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic)
+{
+	if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
+		rds_iw_stats_inc(s_iw_ack_send_piggybacked);
+	return rds_iw_get_ack(ic);
+}
+
+/*
+ * It's kind of lame that we're copying from the posted receive pages into
+ * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
+ * them.  But receiving new congestion bitmaps should be a *rare* event, so
+ * hopefully we won't need to invest that complexity in making it more
+ * efficient.  By copying we can share a simpler core with TCP which has to
+ * copy.
+ */
+static void rds_iw_cong_recv(struct rds_connection *conn,
+			      struct rds_iw_incoming *iwinc)
+{
+	struct rds_cong_map *map;
+	unsigned int map_off;
+	unsigned int map_page;
+	struct rds_page_frag *frag;
+	unsigned long frag_off;
+	unsigned long to_copy;
+	unsigned long copied;
+	uint64_t uncongested = 0;
+	void *addr;
+
+	/* catch completely corrupt packets */
+	if (be32_to_cpu(iwinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
+		return;
+
+	map = conn->c_fcong;
+	map_page = 0;
+	map_off = 0;
+
+	frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
+	frag_off = 0;
+
+	copied = 0;
+
+	while (copied < RDS_CONG_MAP_BYTES) {
+		uint64_t *src, *dst;
+		unsigned int k;
+
+		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
+		BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
+
+		addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
+
+		src = addr + frag_off;
+		dst = (void *)map->m_page_addrs[map_page] + map_off;
+		for (k = 0; k < to_copy; k += 8) {
+			/* Record ports that became uncongested, ie
+			 * bits that changed from 0 to 1. */
+			uncongested |= ~(*src) & *dst;
+			*dst++ = *src++;
+		}
+		kunmap_atomic(addr, KM_SOFTIRQ0);
+
+		copied += to_copy;
+
+		map_off += to_copy;
+		if (map_off == PAGE_SIZE) {
+			map_off = 0;
+			map_page++;
+		}
+
+		frag_off += to_copy;
+		if (frag_off == RDS_FRAG_SIZE) {
+			frag = list_entry(frag->f_item.next,
+					  struct rds_page_frag, f_item);
+			frag_off = 0;
+		}
+	}
+
+	/* the congestion map is in little endian order */
+	uncongested = le64_to_cpu(uncongested);
+
+	rds_cong_map_updated(map, uncongested);
+}
+
+/*
+ * Rings are posted with all the allocations they'll need to queue the
+ * incoming message to the receiving socket so this can't fail.
+ * All fragments start with a header, so we can make sure we're not receiving
+ * garbage, and we can tell a small 8 byte fragment from an ACK frame.
+ */
+struct rds_iw_ack_state {
+	u64		ack_next;
+	u64		ack_recv;
+	unsigned int	ack_required:1;
+	unsigned int	ack_next_valid:1;
+	unsigned int	ack_recv_valid:1;
+};
+
+static void rds_iw_process_recv(struct rds_connection *conn,
+				struct rds_iw_recv_work *recv, u32 byte_len,
+				struct rds_iw_ack_state *state)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct rds_iw_incoming *iwinc = ic->i_iwinc;
+	struct rds_header *ihdr, *hdr;
+
+	/* XXX shut down the connection if port 0,0 are seen? */
+
+	rdsdebug("ic %p iwinc %p recv %p byte len %u\n", ic, iwinc, recv,
+		 byte_len);
+
+	if (byte_len < sizeof(struct rds_header)) {
+		rds_iw_conn_error(conn, "incoming message "
+		       "from %u.%u.%u.%u didn't inclue a "
+		       "header, disconnecting and "
+		       "reconnecting\n",
+		       NIPQUAD(conn->c_faddr));
+		return;
+	}
+	byte_len -= sizeof(struct rds_header);
+
+	ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
+
+	/* Validate the checksum. */
+	if (!rds_message_verify_checksum(ihdr)) {
+		rds_iw_conn_error(conn, "incoming message "
+		       "from %u.%u.%u.%u has corrupted header - "
+		       "forcing a reconnect\n",
+		       NIPQUAD(conn->c_faddr));
+		rds_stats_inc(s_recv_drop_bad_checksum);
+		return;
+	}
+
+	/* Process the ACK sequence which comes with every packet */
+	state->ack_recv = be64_to_cpu(ihdr->h_ack);
+	state->ack_recv_valid = 1;
+
+	/* Process the credits update if there was one */
+	if (ihdr->h_credit)
+		rds_iw_send_add_credits(conn, ihdr->h_credit);
+
+	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) {
+		/* This is an ACK-only packet. The fact that it gets
+		 * special treatment here is that historically, ACKs
+		 * were rather special beasts.
+		 */
+		rds_iw_stats_inc(s_iw_ack_received);
+
+		/*
+		 * Usually the frags make their way on to incs and are then freed as
+		 * the inc is freed.  We don't go that route, so we have to drop the
+		 * page ref ourselves.  We can't just leave the page on the recv
+		 * because that confuses the dma mapping of pages and each recv's use
+		 * of a partial page.  We can leave the frag, though, it will be
+		 * reused.
+		 *
+		 * FIXME: Fold this into the code path below.
+		 */
+		rds_iw_frag_drop_page(recv->r_frag);
+		return;
+	}
+
+	/*
+	 * If we don't already have an inc on the connection then this
+	 * fragment has a header and starts a message.. copy its header
+	 * into the inc and save the inc so we can hang upcoming fragments
+	 * off its list.
+	 */
+	if (iwinc == NULL) {
+		iwinc = recv->r_iwinc;
+		recv->r_iwinc = NULL;
+		ic->i_iwinc = iwinc;
+
+		hdr = &iwinc->ii_inc.i_hdr;
+		memcpy(hdr, ihdr, sizeof(*hdr));
+		ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
+
+		rdsdebug("ic %p iwinc %p rem %u flag 0x%x\n", ic, iwinc,
+			 ic->i_recv_data_rem, hdr->h_flags);
+	} else {
+		hdr = &iwinc->ii_inc.i_hdr;
+		/* We can't just use memcmp here; fragments of a
+		 * single message may carry different ACKs */
+		if (hdr->h_sequence != ihdr->h_sequence
+		 || hdr->h_len != ihdr->h_len
+		 || hdr->h_sport != ihdr->h_sport
+		 || hdr->h_dport != ihdr->h_dport) {
+			rds_iw_conn_error(conn,
+				"fragment header mismatch; forcing reconnect\n");
+			return;
+		}
+	}
+
+	list_add_tail(&recv->r_frag->f_item, &iwinc->ii_frags);
+	recv->r_frag = NULL;
+
+	if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
+		ic->i_recv_data_rem -= RDS_FRAG_SIZE;
+	else {
+		ic->i_recv_data_rem = 0;
+		ic->i_iwinc = NULL;
+
+		if (iwinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
+			rds_iw_cong_recv(conn, iwinc);
+		else {
+			rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
+					  &iwinc->ii_inc, GFP_ATOMIC,
+					  KM_SOFTIRQ0);
+			state->ack_next = be64_to_cpu(hdr->h_sequence);
+			state->ack_next_valid = 1;
+		}
+
+		/* Evaluate the ACK_REQUIRED flag *after* we received
+		 * the complete frame, and after bumping the next_rx
+		 * sequence. */
+		if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
+			rds_stats_inc(s_recv_ack_required);
+			state->ack_required = 1;
+		}
+
+		rds_inc_put(&iwinc->ii_inc);
+	}
+}
+
+/*
+ * Plucking the oldest entry from the ring can be done concurrently with
+ * the thread refilling the ring.  Each ring operation is protected by
+ * spinlocks and the transient state of refilling doesn't change the
+ * recording of which entry is oldest.
+ *
+ * This relies on IB only calling one cq comp_handler for each cq so that
+ * there will only be one caller of rds_recv_incoming() per RDS connection.
+ */
+void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
+{
+	struct rds_connection *conn = context;
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct ib_wc wc;
+	struct rds_iw_ack_state state = { 0, };
+	struct rds_iw_recv_work *recv;
+
+	rdsdebug("conn %p cq %p\n", conn, cq);
+
+	rds_iw_stats_inc(s_iw_rx_cq_call);
+
+	ib_req_notify_cq(cq, IB_CQ_SOLICITED);
+
+	while (ib_poll_cq(cq, 1, &wc) > 0) {
+		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
+			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
+			 be32_to_cpu(wc.ex.imm_data));
+		rds_iw_stats_inc(s_iw_rx_cq_event);
+
+		recv = &ic->i_recvs[rds_iw_ring_oldest(&ic->i_recv_ring)];
+
+		rds_iw_recv_unmap_page(ic, recv);
+
+		/*
+		 * Also process recvs in connecting state because it is possible
+		 * to get a recv completion _before_ the rdmacm ESTABLISHED
+		 * event is processed.
+		 */
+		if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
+			/* We expect errors as the qp is drained during shutdown */
+			if (wc.status == IB_WC_SUCCESS) {
+				rds_iw_process_recv(conn, recv, wc.byte_len, &state);
+			} else {
+				rds_iw_conn_error(conn, "recv completion on "
+				       "%u.%u.%u.%u had status %u, disconnecting and "
+				       "reconnecting\n", NIPQUAD(conn->c_faddr),
+				       wc.status);
+			}
+		}
+
+		rds_iw_ring_free(&ic->i_recv_ring, 1);
+	}
+
+	if (state.ack_next_valid)
+		rds_iw_set_ack(ic, state.ack_next, state.ack_required);
+	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
+		rds_send_drop_acked(conn, state.ack_recv, NULL);
+		ic->i_ack_recv = state.ack_recv;
+	}
+	if (rds_conn_up(conn))
+		rds_iw_attempt_ack(ic);
+
+	/* If we ever end up with a really empty receive ring, we're
+	 * in deep trouble, as the sender will definitely see RNR
+	 * timeouts. */
+	if (rds_iw_ring_empty(&ic->i_recv_ring))
+		rds_iw_stats_inc(s_iw_rx_ring_empty);
+
+	/*
+	 * If the ring is running low, then schedule the thread to refill.
+	 */
+	if (rds_iw_ring_low(&ic->i_recv_ring))
+		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+}
+
+int rds_iw_recv(struct rds_connection *conn)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	int ret = 0;
+
+	rdsdebug("conn %p\n", conn);
+
+	/*
+	 * If we get a temporary posting failure in this context then
+	 * we're really low and we want the caller to back off for a bit.
+	 */
+	mutex_lock(&ic->i_recv_mutex);
+	if (rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
+		ret = -ENOMEM;
+	else
+		rds_iw_stats_inc(s_iw_rx_refill_from_thread);
+	mutex_unlock(&ic->i_recv_mutex);
+
+	if (rds_conn_up(conn))
+		rds_iw_attempt_ack(ic);
+
+	return ret;
+}
+
+int __init rds_iw_recv_init(void)
+{
+	struct sysinfo si;
+	int ret = -ENOMEM;
+
+	/* Default to 30% of all available RAM for recv memory */
+	si_meminfo(&si);
+	rds_iw_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
+
+	rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming",
+					sizeof(struct rds_iw_incoming),
+					0, 0, NULL);
+	if (rds_iw_incoming_slab == NULL)
+		goto out;
+
+	rds_iw_frag_slab = kmem_cache_create("rds_iw_frag",
+					sizeof(struct rds_page_frag),
+					0, 0, NULL);
+	if (rds_iw_frag_slab == NULL)
+		kmem_cache_destroy(rds_iw_incoming_slab);
+	else
+		ret = 0;
+out:
+	return ret;
+}
+
+void rds_iw_recv_exit(void)
+{
+	kmem_cache_destroy(rds_iw_incoming_slab);
+	kmem_cache_destroy(rds_iw_frag_slab);
+}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_ring.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_ring.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_ring.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+
+#include "rds.h"
+#include "iw.h"
+
+/*
+ * Locking for IB rings.
+ * We assume that allocation is always protected by a mutex
+ * in the caller (this is a valid assumption for the current
+ * implementation).
+ *
+ * Freeing always happens in an interrupt, and hence only
+ * races with allocations, but not with other free()s.
+ *
+ * The interaction between allocation and freeing is that
+ * the alloc code has to determine the number of free entries.
+ * To this end, we maintain two counters; an allocation counter
+ * and a free counter. Both are allowed to run freely, and wrap
+ * around.
+ * The number of used entries is always (alloc_ctr - free_ctr) % NR.
+ *
+ * The current implementation makes free_ctr atomic. When the
+ * caller finds an allocation fails, it should set an "alloc fail"
+ * bit and retry the allocation. The "alloc fail" bit essentially tells
+ * the CQ completion handlers to wake it up after freeing some
+ * more entries.
+ */
+
+/*
+ * This only happens on shutdown.
+ */
+DECLARE_WAIT_QUEUE_HEAD(rds_iw_ring_empty_wait);
+
+void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr)
+{
+	memset(ring, 0, sizeof(*ring));
+	ring->w_nr = nr;
+	rdsdebug("ring %p nr %u\n", ring, ring->w_nr);
+}
+
+static inline u32 __rds_iw_ring_used(struct rds_iw_work_ring *ring)
+{
+	u32 diff;
+
+	/* This assumes that atomic_t has at least as many bits as u32 */
+	diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr);
+	BUG_ON(diff > ring->w_nr);
+
+	return diff;
+}
+
+void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr)
+{
+	/* We only ever get called from the connection setup code,
+	 * prior to creating the QP. */
+	BUG_ON(__rds_iw_ring_used(ring));
+	ring->w_nr = nr;
+}
+
+static int __rds_iw_ring_empty(struct rds_iw_work_ring *ring)
+{
+	return __rds_iw_ring_used(ring) == 0;
+}
+
+u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos)
+{
+	u32 ret = 0, avail;
+
+	avail = ring->w_nr - __rds_iw_ring_used(ring);
+
+	rdsdebug("ring %p val %u next %u free %u\n", ring, val,
+		 ring->w_alloc_ptr, avail);
+
+	if (val && avail) {
+		ret = min(val, avail);
+		*pos = ring->w_alloc_ptr;
+
+		ring->w_alloc_ptr = (ring->w_alloc_ptr + ret) % ring->w_nr;
+		ring->w_alloc_ctr += ret;
+	}
+
+	return ret;
+}
+
+void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val)
+{
+	ring->w_free_ptr = (ring->w_free_ptr + val) % ring->w_nr;
+	atomic_add(val, &ring->w_free_ctr);
+
+	if (__rds_iw_ring_empty(ring) &&
+	    waitqueue_active(&rds_iw_ring_empty_wait))
+		wake_up(&rds_iw_ring_empty_wait);
+}
+
+void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val)
+{
+	ring->w_alloc_ptr = (ring->w_alloc_ptr - val) % ring->w_nr;
+	ring->w_alloc_ctr -= val;
+}
+
+int rds_iw_ring_empty(struct rds_iw_work_ring *ring)
+{
+	return __rds_iw_ring_empty(ring);
+}
+
+int rds_iw_ring_low(struct rds_iw_work_ring *ring)
+{
+	return __rds_iw_ring_used(ring) <= (ring->w_nr >> 1);
+}
+
+
+/*
+ * returns the oldest alloced ring entry.  This will be the next one
+ * freed.  This can't be called if there are none allocated.
+ */
+u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring)
+{
+	return ring->w_free_ptr;
+}
+
+/*
+ * returns the number of completed work requests.
+ */
+
+u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest)
+{
+	u32 ret;
+
+	if (oldest <= (unsigned long long)wr_id)
+		ret = (unsigned long long)wr_id - oldest + 1;
+	else
+		ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1;
+
+	rdsdebug("ring %p ret %u wr_id %u oldest %u\n", ring, ret,
+		 wr_id, oldest);
+	return ret;
+}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_send.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_send.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_send.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,976 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+
+#include "rds.h"
+#include "rdma.h"
+#include "iw.h"
+
+static void rds_iw_send_rdma_complete(struct rds_message *rm,
+				      int wc_status)
+{
+	int notify_status;
+
+	switch (wc_status) {
+	case IB_WC_WR_FLUSH_ERR:
+		return;
+
+	case IB_WC_SUCCESS:
+		notify_status = RDS_RDMA_SUCCESS;
+		break;
+
+	case IB_WC_REM_ACCESS_ERR:
+		notify_status = RDS_RDMA_REMOTE_ERROR;
+		break;
+
+	default:
+		notify_status = RDS_RDMA_OTHER_ERROR;
+		break;
+	}
+	rds_rdma_send_complete(rm, notify_status);
+}
+
+static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
+				   struct rds_rdma_op *op)
+{
+	if (op->r_mapped) {
+		ib_dma_unmap_sg(ic->i_cm_id->device,
+			op->r_sg, op->r_nents,
+			op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		op->r_mapped = 0;
+	}
+}
+
+static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
+			  struct rds_iw_send_work *send,
+			  int wc_status)
+{
+	struct rds_message *rm = send->s_rm;
+
+	rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
+
+	ib_dma_unmap_sg(ic->i_cm_id->device,
+		     rm->m_sg, rm->m_nents,
+		     DMA_TO_DEVICE);
+
+	if (rm->m_rdma_op != NULL) {
+		rds_iw_send_unmap_rdma(ic, rm->m_rdma_op);
+
+		/* If the user asked for a completion notification on this
+		 * message, we can implement three different semantics:
+		 *  1.	Notify when we received the ACK on the RDS message
+		 *	that was queued with the RDMA. This provides reliable
+		 *	notification of RDMA status at the expense of a one-way
+		 *	packet delay.
+		 *  2.	Notify when the IB stack gives us the completion event for
+		 *	the RDMA operation.
+		 *  3.	Notify when the IB stack gives us the completion event for
+		 *	the accompanying RDS messages.
+		 * Here, we implement approach #3. To implement approach #2,
+		 * call rds_rdma_send_complete from the cq_handler. To implement #1,
+		 * don't call rds_rdma_send_complete at all, and fall back to the notify
+		 * handling in the ACK processing code.
+		 *
+		 * Note: There's no need to explicitly sync any RDMA buffers using
+		 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
+		 * operation itself unmapped the RDMA buffers, which takes care
+		 * of synching.
+		 */
+		rds_iw_send_rdma_complete(rm, wc_status);
+
+		if (rm->m_rdma_op->r_write)
+			rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes);
+		else
+			rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes);
+	}
+
+	/* If anyone waited for this message to get flushed out, wake
+	 * them up now */
+	rds_message_unmapped(rm);
+
+	rds_message_put(rm);
+	send->s_rm = NULL;
+}
+
+void rds_iw_send_init_ring(struct rds_iw_connection *ic)
+{
+	struct rds_iw_send_work *send;
+	u32 i;
+
+	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
+		struct ib_sge *sge;
+
+		send->s_rm = NULL;
+		send->s_op = NULL;
+		send->s_mapping = NULL;
+
+		send->s_wr.next = NULL;
+		send->s_wr.wr_id = i;
+		send->s_wr.sg_list = send->s_sge;
+		send->s_wr.num_sge = 1;
+		send->s_wr.opcode = IB_WR_SEND;
+		send->s_wr.send_flags = 0;
+		send->s_wr.ex.imm_data = 0;
+
+		sge = rds_iw_data_sge(ic, send->s_sge);
+		sge->lkey = 0;
+
+		sge = rds_iw_header_sge(ic, send->s_sge);
+		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
+		sge->length = sizeof(struct rds_header);
+		sge->lkey = 0;
+
+		send->s_mr = ib_alloc_fast_reg_mr(ic->i_pd, fastreg_message_size);
+		if (IS_ERR(send->s_mr)) {
+			printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed\n");
+			break;
+		}
+
+		send->s_page_list = ib_alloc_fast_reg_page_list(
+			ic->i_cm_id->device, fastreg_message_size);
+		if (IS_ERR(send->s_page_list)) {
+			printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed\n");
+			break;
+		}
+	}
+}
+
+void rds_iw_send_clear_ring(struct rds_iw_connection *ic)
+{
+	struct rds_iw_send_work *send;
+	u32 i;
+
+	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
+		BUG_ON(!send->s_mr);
+		ib_dereg_mr(send->s_mr);
+		BUG_ON(!send->s_page_list);
+		ib_free_fast_reg_page_list(send->s_page_list);
+		if (send->s_wr.opcode == 0xdead)
+			continue;
+		if (send->s_rm)
+			rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
+		if (send->s_op)
+			rds_iw_send_unmap_rdma(ic, send->s_op);
+	}
+}
+
+/*
+ * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
+ * operations performed in the send path.  As the sender allocs and potentially
+ * unallocs the next free entry in the ring it doesn't alter which is
+ * the next to be freed, which is what this is concerned with.
+ */
+void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
+{
+	struct rds_connection *conn = context;
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct ib_wc wc;
+	struct rds_iw_send_work *send;
+	u32 completed;
+	u32 oldest;
+	u32 i;
+	int ret;
+
+	rdsdebug("cq %p conn %p\n", cq, conn);
+	rds_iw_stats_inc(s_iw_tx_cq_call);
+	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+	if (ret)
+		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
+
+	while (ib_poll_cq(cq, 1, &wc) > 0) {
+		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
+			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
+			 be32_to_cpu(wc.ex.imm_data));
+		rds_iw_stats_inc(s_iw_tx_cq_event);
+
+		if (wc.status != IB_WC_SUCCESS) {
+			printk(KERN_ERR "WC Error:  status = %d opcode = %d\n", wc.status, wc.opcode);
+			break;
+		}
+
+		if (wc.opcode == IB_WC_LOCAL_INV && wc.wr_id == RDS_IW_LOCAL_INV_WR_ID) {
+			ic->i_fastreg_posted = 0;
+			continue;
+		}
+
+		if (wc.opcode == IB_WC_FAST_REG_MR && wc.wr_id == RDS_IW_FAST_REG_WR_ID) {
+			ic->i_fastreg_posted = 1;
+			continue;
+		}
+
+		if (wc.wr_id == RDS_IW_ACK_WR_ID) {
+			if (ic->i_ack_queued + HZ/2 < jiffies)
+				rds_iw_stats_inc(s_iw_tx_stalled);
+			rds_iw_ack_send_complete(ic);
+			continue;
+		}
+
+		oldest = rds_iw_ring_oldest(&ic->i_send_ring);
+
+		completed = rds_iw_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
+
+		for (i = 0; i < completed; i++) {
+			send = &ic->i_sends[oldest];
+
+			/* In the error case, wc.opcode sometimes contains garbage */
+			switch (send->s_wr.opcode) {
+			case IB_WR_SEND:
+				if (send->s_rm)
+					rds_iw_send_unmap_rm(ic, send, wc.status);
+				break;
+			case IB_WR_FAST_REG_MR:
+			case IB_WR_RDMA_WRITE:
+			case IB_WR_RDMA_READ:
+			case IB_WR_RDMA_READ_WITH_INV:
+				/* Nothing to be done - the SG list will be unmapped
+				 * when the SEND completes. */
+				break;
+			default:
+				if (printk_ratelimit())
+					printk(KERN_NOTICE
+						"RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
+						__func__, send->s_wr.opcode);
+				break;
+			}
+
+			send->s_wr.opcode = 0xdead;
+			send->s_wr.num_sge = 1;
+			if (send->s_queued + HZ/2 < jiffies)
+				rds_iw_stats_inc(s_iw_tx_stalled);
+
+			/* If a RDMA operation produced an error, signal this right
+			 * away. If we don't, the subsequent SEND that goes with this
+			 * RDMA will be canceled with ERR_WFLUSH, and the application
+			 * never learn that the RDMA failed. */
+			if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
+				struct rds_message *rm;
+
+				rm = rds_send_get_message(conn, send->s_op);
+				if (rm)
+					rds_iw_send_rdma_complete(rm, wc.status);
+			}
+
+			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
+		}
+
+		rds_iw_ring_free(&ic->i_send_ring, completed);
+
+		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)
+		 || test_bit(0, &conn->c_map_queued))
+			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
+		/* We expect errors as the qp is drained during shutdown */
+		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
+			rds_iw_conn_error(conn,
+				"send completion on %u.%u.%u.%u "
+				"had status %u, disconnecting and reconnecting\n",
+				NIPQUAD(conn->c_faddr), wc.status);
+		}
+	}
+}
+
+/*
+ * This is the main function for allocating credits when sending
+ * messages.
+ *
+ * Conceptually, we have two counters:
+ *  -	send credits: this tells us how many WRs we're allowed
+ *	to submit without overruning the reciever's queue. For
+ *	each SEND WR we post, we decrement this by one.
+ *
+ *  -	posted credits: this tells us how many WRs we recently
+ *	posted to the receive queue. This value is transferred
+ *	to the peer as a "credit update" in a RDS header field.
+ *	Every time we transmit credits to the peer, we subtract
+ *	the amount of transferred credits from this counter.
+ *
+ * It is essential that we avoid situations where both sides have
+ * exhausted their send credits, and are unable to send new credits
+ * to the peer. We achieve this by requiring that we send at least
+ * one credit update to the peer before exhausting our credits.
+ * When new credits arrive, we subtract one credit that is withheld
+ * until we've posted new buffers and are ready to transmit these
+ * credits (see rds_iw_send_add_credits below).
+ *
+ * The RDS send code is essentially single-threaded; rds_send_xmit
+ * grabs c_send_lock to ensure exclusive access to the send ring.
+ * However, the ACK sending code is independent and can race with
+ * message SENDs.
+ *
+ * In the send path, we need to update the counters for send credits
+ * and the counter of posted buffers atomically - when we use the
+ * last available credit, we cannot allow another thread to race us
+ * and grab the posted credits counter.  Hence, we have to use a
+ * spinlock to protect the credit counter, or use atomics.
+ *
+ * Spinlocks shared between the send and the receive path are bad,
+ * because they create unnecessary delays. An early implementation
+ * using a spinlock showed a 5% degradation in throughput at some
+ * loads.
+ *
+ * This implementation avoids spinlocks completely, putting both
+ * counters into a single atomic, and updating that atomic using
+ * atomic_add (in the receive path, when receiving fresh credits),
+ * and using atomic_cmpxchg when updating the two counters.
+ */
+int rds_iw_send_grab_credits(struct rds_iw_connection *ic,
+			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
+{
+	unsigned int avail, posted, got = 0, advertise;
+	long oldval, newval;
+
+	*adv_credits = 0;
+	if (!ic->i_flowctl)
+		return wanted;
+
+try_again:
+	advertise = 0;
+	oldval = newval = atomic_read(&ic->i_credits);
+	posted = IB_GET_POST_CREDITS(oldval);
+	avail = IB_GET_SEND_CREDITS(oldval);
+
+	rdsdebug("rds_iw_send_grab_credits(%u): credits=%u posted=%u\n",
+			wanted, avail, posted);
+
+	/* The last credit must be used to send a credit update. */
+	if (avail && !posted)
+		avail--;
+
+	if (avail < wanted) {
+		struct rds_connection *conn = ic->i_cm_id->context;
+
+		/* Oops, there aren't that many credits left! */
+		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
+		got = avail;
+	} else {
+		/* Sometimes you get what you want, lalala. */
+		got = wanted;
+	}
+	newval -= IB_SET_SEND_CREDITS(got);
+
+	/*
+	 * If need_posted is non-zero, then the caller wants
+	 * the posted regardless of whether any send credits are
+	 * available.
+	 */
+	if (posted && (got || need_posted)) {
+		advertise = min_t(unsigned int, posted, max_posted);
+		newval -= IB_SET_POST_CREDITS(advertise);
+	}
+
+	/* Finally bill everything */
+	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
+		goto try_again;
+
+	*adv_credits = advertise;
+	return got;
+}
+
+void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	if (credits == 0)
+		return;
+
+	rdsdebug("rds_iw_send_add_credits(%u): current=%u%s\n",
+			credits,
+			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
+			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
+
+	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
+	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
+		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
+	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
+
+	rds_iw_stats_inc(s_iw_rx_credit_updates);
+}
+
+void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	if (posted == 0)
+		return;
+
+	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
+
+	/* Decide whether to send an update to the peer now.
+	 * If we would send a credit update for every single buffer we
+	 * post, we would end up with an ACK storm (ACK arrives,
+	 * consumes buffer, we refill the ring, send ACK to remote
+	 * advertising the newly posted buffer... ad inf)
+	 *
+	 * Performance pretty much depends on how often we send
+	 * credit updates - too frequent updates mean lots of ACKs.
+	 * Too infrequent updates, and the peer will run out of
+	 * credits and has to throttle.
+	 * For the time being, 16 seems to be a good compromise.
+	 */
+	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
+		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+}
+
+static inline void
+rds_iw_xmit_populate_wr(struct rds_iw_connection *ic,
+		struct rds_iw_send_work *send, unsigned int pos,
+		unsigned long buffer, unsigned int length,
+		int send_flags)
+{
+	struct ib_sge *sge;
+
+	WARN_ON(pos != send - ic->i_sends);
+
+	send->s_wr.send_flags = send_flags;
+	send->s_wr.opcode = IB_WR_SEND;
+	send->s_wr.num_sge = 2;
+	send->s_wr.next = NULL;
+	send->s_queued = jiffies;
+	send->s_op = NULL;
+
+	if (length != 0) {
+		sge = rds_iw_data_sge(ic, send->s_sge);
+		sge->addr = buffer;
+		sge->length = length;
+		sge->lkey = rds_iw_local_dma_lkey(ic);
+
+		sge = rds_iw_header_sge(ic, send->s_sge);
+	} else {
+		/* We're sending a packet with no payload. There is only
+		 * one SGE */
+		send->s_wr.num_sge = 1;
+		sge = &send->s_sge[0];
+	}
+
+	sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
+	sge->length = sizeof(struct rds_header);
+	sge->lkey = rds_iw_local_dma_lkey(ic);
+}
+
+/*
+ * This can be called multiple times for a given message.  The first time
+ * we see a message we map its scatterlist into the IB device so that
+ * we can provide that mapped address to the IB scatter gather entries
+ * in the IB work requests.  We translate the scatterlist into a series
+ * of work requests that fragment the message.  These work requests complete
+ * in order so we pass ownership of the message to the completion handler
+ * once we send the final fragment.
+ *
+ * The RDS core uses the c_send_lock to only enter this function once
+ * per connection.  This makes sure that the tx ring alloc/unalloc pairs
+ * don't get out of sync and confuse the ring.
+ */
+int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
+		unsigned int hdr_off, unsigned int sg, unsigned int off)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct ib_device *dev = ic->i_cm_id->device;
+	struct rds_iw_send_work *send = NULL;
+	struct rds_iw_send_work *first;
+	struct rds_iw_send_work *prev;
+	struct ib_send_wr *failed_wr;
+	struct scatterlist *scat;
+	u32 pos;
+	u32 i;
+	u32 work_alloc;
+	u32 credit_alloc;
+	u32 posted;
+	u32 adv_credits = 0;
+	int send_flags = 0;
+	int sent;
+	int ret;
+	int flow_controlled = 0;
+
+	BUG_ON(off % RDS_FRAG_SIZE);
+	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
+
+	/* Fastreg support */
+	if (rds_rdma_cookie_key(rm->m_rdma_cookie)
+	 && !ic->i_fastreg_posted) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	/* FIXME we may overallocate here */
+	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
+		i = 1;
+	else
+		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
+
+	work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
+	if (work_alloc == 0) {
+		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
+		rds_iw_stats_inc(s_iw_tx_ring_full);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	credit_alloc = work_alloc;
+	if (ic->i_flowctl) {
+		credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
+		adv_credits += posted;
+		if (credit_alloc < work_alloc) {
+			rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
+			work_alloc = credit_alloc;
+			flow_controlled++;
+		}
+		if (work_alloc == 0) {
+			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
+			rds_iw_stats_inc(s_iw_tx_throttle);
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+
+	/* map the message the first time we see it */
+	if (ic->i_rm == NULL) {
+		/*
+		printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
+				be16_to_cpu(rm->m_inc.i_hdr.h_dport),
+				rm->m_inc.i_hdr.h_flags,
+				be32_to_cpu(rm->m_inc.i_hdr.h_len));
+		   */
+		if (rm->m_nents) {
+			rm->m_count = ib_dma_map_sg(dev,
+					 rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
+			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
+			if (rm->m_count == 0) {
+				rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
+				rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
+				ret = -ENOMEM; /* XXX ? */
+				goto out;
+			}
+		} else {
+			rm->m_count = 0;
+		}
+
+		ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
+		ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
+		rds_message_addref(rm);
+		ic->i_rm = rm;
+
+		/* Finalize the header */
+		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
+			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
+		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
+			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
+
+		/* If it has a RDMA op, tell the peer we did it. This is
+		 * used by the peer to release use-once RDMA MRs. */
+		if (rm->m_rdma_op) {
+			struct rds_ext_header_rdma ext_hdr;
+
+			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key);
+			rds_message_add_extension(&rm->m_inc.i_hdr,
+					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
+		}
+		if (rm->m_rdma_cookie) {
+			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
+					rds_rdma_cookie_key(rm->m_rdma_cookie),
+					rds_rdma_cookie_offset(rm->m_rdma_cookie));
+		}
+
+		/* Note - rds_iw_piggyb_ack clears the ACK_REQUIRED bit, so
+		 * we should not do this unless we have a chance of at least
+		 * sticking the header into the send ring. Which is why we
+		 * should call rds_iw_ring_alloc first. */
+		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_iw_piggyb_ack(ic));
+		rds_message_make_checksum(&rm->m_inc.i_hdr);
+
+		/*
+		 * Update adv_credits since we reset the ACK_REQUIRED bit.
+		 */
+		rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
+		adv_credits += posted;
+		BUG_ON(adv_credits > 255);
+	} else if (ic->i_rm != rm)
+		BUG();
+
+	send = &ic->i_sends[pos];
+	first = send;
+	prev = NULL;
+	scat = &rm->m_sg[sg];
+	sent = 0;
+	i = 0;
+
+	/* Sometimes you want to put a fence between an RDMA
+	 * READ and the following SEND.
+	 * We could either do this all the time
+	 * or when requested by the user. Right now, we let
+	 * the application choose.
+	 */
+	if (rm->m_rdma_op && rm->m_rdma_op->r_fence)
+		send_flags = IB_SEND_FENCE;
+
+	/*
+	 * We could be copying the header into the unused tail of the page.
+	 * That would need to be changed in the future when those pages might
+	 * be mapped userspace pages or page cache pages.  So instead we always
+	 * use a second sge and our long-lived ring of mapped headers.  We send
+	 * the header after the data so that the data payload can be aligned on
+	 * the receiver.
+	 */
+
+	/* handle a 0-len message */
+	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
+		rds_iw_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
+		goto add_header;
+	}
+
+	/* if there's data reference it with a chain of work reqs */
+	for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
+		unsigned int len;
+
+		send = &ic->i_sends[pos];
+
+		len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
+		rds_iw_xmit_populate_wr(ic, send, pos,
+				ib_sg_dma_address(dev, scat) + off, len,
+				send_flags);
+
+		/*
+		 * We want to delay signaling completions just enough to get
+		 * the batching benefits but not so much that we create dead time
+		 * on the wire.
+		 */
+		if (ic->i_unsignaled_wrs-- == 0) {
+			ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
+			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+		}
+
+		ic->i_unsignaled_bytes -= len;
+		if (ic->i_unsignaled_bytes <= 0) {
+			ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
+			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+		}
+
+		/*
+		 * Always signal the last one if we're stopping due to flow control.
+		 */
+		if (flow_controlled && i == (work_alloc-1)) {
+			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+		}
+
+		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
+			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
+
+		sent += len;
+		off += len;
+		if (off == ib_sg_dma_len(dev, scat)) {
+			scat++;
+			off = 0;
+		}
+
+add_header:
+		/* Tack on the header after the data. The header SGE should already
+		 * have been set up to point to the right header buffer. */
+		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
+
+		if (0) {
+			struct rds_header *hdr = &ic->i_send_hdrs[pos];
+
+			printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
+				be16_to_cpu(hdr->h_dport),
+				hdr->h_flags,
+				be32_to_cpu(hdr->h_len));
+		}
+		if (adv_credits) {
+			struct rds_header *hdr = &ic->i_send_hdrs[pos];
+
+			/* add credit and redo the header checksum */
+			hdr->h_credit = adv_credits;
+			rds_message_make_checksum(hdr);
+			adv_credits = 0;
+			rds_iw_stats_inc(s_iw_tx_credit_updates);
+		}
+
+		if (prev)
+			prev->s_wr.next = &send->s_wr;
+		prev = send;
+
+		pos = (pos + 1) % ic->i_send_ring.w_nr;
+	}
+
+	/* Account the RDS header in the number of bytes we sent, but just once.
+	 * The caller has no concept of fragmentation. */
+	if (hdr_off == 0)
+		sent += sizeof(struct rds_header);
+
+	/* if we finished the message then send completion owns it */
+	if (scat == &rm->m_sg[rm->m_count]) {
+		prev->s_rm = ic->i_rm;
+		prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+		ic->i_rm = NULL;
+	}
+
+	if (i < work_alloc) {
+		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
+		work_alloc = i;
+	}
+	if (ic->i_flowctl && i < credit_alloc)
+		rds_iw_send_add_credits(conn, credit_alloc - i);
+
+	/* XXX need to worry about failed_wr and partial sends. */
+	failed_wr = &first->s_wr;
+	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
+	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
+		 first, &first->s_wr, ret, failed_wr);
+	BUG_ON(failed_wr != &first->s_wr);
+	if (ret) {
+		printk(KERN_WARNING "RDS/IW: ib_post_send to %u.%u.%u.%u "
+		       "returned %d\n", NIPQUAD(conn->c_faddr), ret);
+		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
+		if (prev->s_rm) {
+			ic->i_rm = prev->s_rm;
+			prev->s_rm = NULL;
+		}
+		goto out;
+	}
+
+	ret = sent;
+out:
+	BUG_ON(adv_credits);
+	return ret;
+}
+
+static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rds_iw_connection *ic, struct rds_iw_send_work *send, int nent, int len, u64 sg_addr)
+{
+	BUG_ON(nent > send->s_page_list->max_page_list_len);
+	/*
+	 * Perform a WR for the fast_reg_mr. Each individual page
+	 * in the sg list is added to the fast reg page list and placed
+	 * inside the fast_reg_mr WR.
+	 */
+	send->s_wr.opcode = IB_WR_FAST_REG_MR;
+	send->s_wr.wr.fast_reg.length = len;
+	send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey;
+	send->s_wr.wr.fast_reg.page_list = send->s_page_list;
+	send->s_wr.wr.fast_reg.page_list_len = nent;
+	send->s_wr.wr.fast_reg.page_shift = rds_iwdev->page_shift;
+	send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE;
+	send->s_wr.wr.fast_reg.iova_start = sg_addr;
+
+	ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
+}
+
+int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+	struct rds_iw_send_work *send = NULL;
+	struct rds_iw_send_work *first;
+	struct rds_iw_send_work *prev;
+	struct ib_send_wr *failed_wr;
+	struct rds_iw_device *rds_iwdev;
+	struct scatterlist *scat;
+	unsigned long len;
+	u64 remote_addr = op->r_remote_addr;
+	u32 pos, fr_pos;
+	u32 work_alloc;
+	u32 i;
+	u32 j;
+	int sent;
+	int ret;
+	int num_sge;
+
+	rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
+
+	/* map the message the first time we see it */
+	if (!op->r_mapped) {
+		op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
+					op->r_sg, op->r_nents, (op->r_write) ?
+					DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
+		if (op->r_count == 0) {
+			rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
+			ret = -ENOMEM; /* XXX ? */
+			goto out;
+		}
+
+		op->r_mapped = 1;
+	}
+
+	if (!op->r_write) {
+		/* Alloc space on the send queue for the fastreg */
+		work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
+		if (work_alloc != 1) {
+			rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
+			rds_iw_stats_inc(s_iw_tx_ring_full);
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+
+	/*
+	 * Instead of knowing how to return a partial rdma read/write we insist that there
+	 * be enough work requests to send the entire message.
+	 */
+	i = ceil(op->r_count, rds_iwdev->max_sge);
+
+	work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
+	if (work_alloc != i) {
+		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
+		rds_iw_stats_inc(s_iw_tx_ring_full);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	send = &ic->i_sends[pos];
+	if (!op->r_write) {
+		first = prev = &ic->i_sends[fr_pos];
+	} else {
+		first = send;
+		prev = NULL;
+	}
+	scat = &op->r_sg[0];
+	sent = 0;
+	num_sge = op->r_count;
+
+	for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
+		send->s_wr.send_flags = 0;
+		send->s_queued = jiffies;
+
+		/*
+		 * We want to delay signaling completions just enough to get
+		 * the batching benefits but not so much that we create dead time on the wire.
+		 */
+		if (ic->i_unsignaled_wrs-- == 0) {
+			ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
+			send->s_wr.send_flags = IB_SEND_SIGNALED;
+		}
+
+		/* To avoid the need to have the plumbing to invalidate the fastreg_mr used
+		 * for local access after RDS is finished with it, using
+		 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
+		 */
+		if (op->r_write)
+			send->s_wr.opcode = IB_WR_RDMA_WRITE;
+		else
+			send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
+
+		send->s_wr.wr.rdma.remote_addr = remote_addr;
+		send->s_wr.wr.rdma.rkey = op->r_key;
+		send->s_op = op;
+
+		if (num_sge > rds_iwdev->max_sge) {
+			send->s_wr.num_sge = rds_iwdev->max_sge;
+			num_sge -= rds_iwdev->max_sge;
+		} else
+			send->s_wr.num_sge = num_sge;
+
+		send->s_wr.next = NULL;
+
+		if (prev)
+			prev->s_wr.next = &send->s_wr;
+
+		for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
+			len = ib_sg_dma_len(ic->i_cm_id->device, scat);
+
+			if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
+				send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat);
+			else {
+				send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat);
+				send->s_sge[j].length = len;
+				send->s_sge[j].lkey = rds_iw_local_dma_lkey(ic);
+			}
+
+			sent += len;
+			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
+			remote_addr += len;
+
+			scat++;
+		}
+
+		if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) {
+			send->s_wr.num_sge = 1;
+			send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr;
+			send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes;
+			send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey;
+		}
+
+		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
+			&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
+
+		prev = send;
+		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
+			send = ic->i_sends;
+	}
+
+	/* if we finished the message then send completion owns it */
+	if (scat == &op->r_sg[op->r_count])
+		first->s_wr.send_flags = IB_SEND_SIGNALED;
+
+	if (i < work_alloc) {
+		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
+		work_alloc = i;
+	}
+
+	/* On iWARP, local memory access by a remote system (ie, RDMA Read) is not
+	 * recommended.  Putting the lkey on the wire is a security hole, as it can
+	 * allow for memory access to all of memory on the remote system.  Some
+	 * adapters do not allow using the lkey for this at all.  To bypass this use a
+	 * fastreg_mr (or possibly a dma_mr)
+	 */
+	if (!op->r_write) {
+		rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
+			op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
+		work_alloc++;
+	}
+
+	failed_wr = &first->s_wr;
+	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
+	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
+		 first, &first->s_wr, ret, failed_wr);
+	BUG_ON(failed_wr != &first->s_wr);
+	if (ret) {
+		printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %u.%u.%u.%u "
+		       "returned %d\n", NIPQUAD(conn->c_faddr), ret);
+		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+void rds_iw_xmit_complete(struct rds_connection *conn)
+{
+	struct rds_iw_connection *ic = conn->c_transport_data;
+
+	/* We may have a pending ACK or window update we were unable
+	 * to send previously (due to flow control). Try again. */
+	rds_iw_attempt_ack(ic);
+}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_stats.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_stats.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_stats.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/percpu.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#include "rds.h"
+#include "iw.h"
+
+RDS_DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats)
+	____cacheline_aligned;
+
+static char *rds_iw_stat_names[] = {
+	"iw_connect_raced",
+	"iw_listen_closed_stale",
+	"iw_tx_cq_call",
+	"iw_tx_cq_event",
+	"iw_tx_ring_full",
+	"iw_tx_throttle",
+	"iw_tx_sg_mapping_failure",
+	"iw_tx_stalled",
+	"iw_tx_credit_updates",
+	"iw_rx_cq_call",
+	"iw_rx_cq_event",
+	"iw_rx_ring_empty",
+	"iw_rx_refill_from_cq",
+	"iw_rx_refill_from_thread",
+	"iw_rx_alloc_limit",
+	"iw_rx_credit_updates",
+	"iw_ack_sent",
+	"iw_ack_send_failure",
+	"iw_ack_send_delayed",
+	"iw_ack_send_piggybacked",
+	"iw_ack_received",
+	"iw_rdma_mr_alloc",
+	"iw_rdma_mr_free",
+	"iw_rdma_mr_used",
+	"iw_rdma_mr_pool_flush",
+	"iw_rdma_mr_pool_wait",
+	"iw_rdma_mr_pool_depleted",
+};
+
+unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
+				    unsigned int avail)
+{
+	struct rds_iw_statistics stats = {0, };
+	uint64_t *src;
+	uint64_t *sum;
+	size_t i;
+	int cpu;
+
+	if (avail < ARRAY_SIZE(rds_iw_stat_names))
+		goto out;
+
+	for_each_online_cpu(cpu) {
+		src = (uint64_t *)&(rds_per_cpu(rds_iw_stats, cpu));
+		sum = (uint64_t *)&stats;
+		for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++)
+			*(sum++) += *(src++);
+	}
+
+	rds_stats_info_copy(iter, (uint64_t *)&stats, rds_iw_stat_names,
+			    ARRAY_SIZE(rds_iw_stat_names));
+out:
+	return ARRAY_SIZE(rds_iw_stat_names);
+}

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_sysctl.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_sysctl.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/iw_sysctl.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2006 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+
+#include "iw.h"
+
+static struct ctl_table_header *rds_iw_sysctl_hdr;
+
+unsigned long rds_iw_sysctl_max_send_wr = RDS_IW_DEFAULT_SEND_WR;
+unsigned long rds_iw_sysctl_max_recv_wr = RDS_IW_DEFAULT_RECV_WR;
+unsigned long rds_iw_sysctl_max_recv_allocation = (128 * 1024 * 1024) / RDS_FRAG_SIZE;
+static unsigned long rds_iw_sysctl_max_wr_min = 1;
+/* hardware will fail CQ creation long before this */
+static unsigned long rds_iw_sysctl_max_wr_max = (u32)~0;
+
+unsigned long rds_iw_sysctl_max_unsig_wrs = 16;
+static unsigned long rds_iw_sysctl_max_unsig_wr_min = 1;
+static unsigned long rds_iw_sysctl_max_unsig_wr_max = 64;
+
+unsigned long rds_iw_sysctl_max_unsig_bytes = (16 << 20);
+static unsigned long rds_iw_sysctl_max_unsig_bytes_min = 1;
+static unsigned long rds_iw_sysctl_max_unsig_bytes_max = ~0UL;
+
+unsigned int rds_iw_sysctl_flow_control = 1;
+
+ctl_table rds_iw_sysctl_table[] = {
+	{
+		.ctl_name       = CTL_UNNUMBERED,
+		.procname       = "max_send_wr",
+		.data		= &rds_iw_sysctl_max_send_wr,
+		.maxlen         = sizeof(unsigned long),
+		.mode           = 0644,
+		.proc_handler   = &proc_doulongvec_minmax,
+		.extra1		= &rds_iw_sysctl_max_wr_min,
+		.extra2		= &rds_iw_sysctl_max_wr_max,
+	},
+	{
+		.ctl_name       = CTL_UNNUMBERED,
+		.procname       = "max_recv_wr",
+		.data		= &rds_iw_sysctl_max_recv_wr,
+		.maxlen         = sizeof(unsigned long),
+		.mode           = 0644,
+		.proc_handler   = &proc_doulongvec_minmax,
+		.extra1		= &rds_iw_sysctl_max_wr_min,
+		.extra2		= &rds_iw_sysctl_max_wr_max,
+	},
+	{
+		.ctl_name       = CTL_UNNUMBERED,
+		.procname       = "max_unsignaled_wr",
+		.data		= &rds_iw_sysctl_max_unsig_wrs,
+		.maxlen         = sizeof(unsigned long),
+		.mode           = 0644,
+		.proc_handler   = &proc_doulongvec_minmax,
+		.extra1		= &rds_iw_sysctl_max_unsig_wr_min,
+		.extra2		= &rds_iw_sysctl_max_unsig_wr_max,
+	},
+	{
+		.ctl_name       = CTL_UNNUMBERED,
+		.procname       = "max_unsignaled_bytes",
+		.data		= &rds_iw_sysctl_max_unsig_bytes,
+		.maxlen         = sizeof(unsigned long),
+		.mode           = 0644,
+		.proc_handler   = &proc_doulongvec_minmax,
+		.extra1		= &rds_iw_sysctl_max_unsig_bytes_min,
+		.extra2		= &rds_iw_sysctl_max_unsig_bytes_max,
+	},
+	{
+		.ctl_name       = CTL_UNNUMBERED,
+		.procname       = "max_recv_allocation",
+		.data		= &rds_iw_sysctl_max_recv_allocation,
+		.maxlen         = sizeof(unsigned long),
+		.mode           = 0644,
+		.proc_handler   = &proc_doulongvec_minmax,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "flow_control",
+		.data		= &rds_iw_sysctl_flow_control,
+		.maxlen		= sizeof(rds_iw_sysctl_flow_control),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{ .ctl_name = 0}
+};
+
+static struct ctl_path rds_iw_sysctl_path[] = {
+	{ .procname = "net", .ctl_name = CTL_NET, },
+	{ .procname = "rds", .ctl_name = CTL_UNNUMBERED, },
+	{ .procname = "iw", .ctl_name = CTL_UNNUMBERED, },
+	{ }
+};
+
+void rds_iw_sysctl_exit(void)
+{
+	if (rds_iw_sysctl_hdr)
+		unregister_sysctl_table(rds_iw_sysctl_hdr);
+}
+
+int __init rds_iw_sysctl_init(void)
+{
+	rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
+	if (rds_iw_sysctl_hdr == NULL)
+		return -ENOMEM;
+	return 0;
+}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -34,7 +34,11 @@
 #include <linux/in.h>
 
 #include "rds.h"
+#include "loop.h"
 
+static DEFINE_SPINLOCK(loop_conns_lock);
+static LIST_HEAD(loop_conns);
+
 /*
  * This 'loopback' transport is a special case for flows that originate
  * and terminate on the same machine.
@@ -97,26 +101,73 @@
 	return 0;
 }
 
+struct rds_loop_connection
+{
+	struct list_head loop_node;
+	struct rds_connection *conn;
+};
+
 /*
- * We don't actually do anything to maintain connections so these are all
- * nops.
+ * Even the loopback transport needs to keep track of its connections,
+ * so it can call rds_conn_destroy() on them on exit. N.B. there are
+ * 1+ loopback addresses (127.*.*.*) so it's not a bug to have
+ * multiple loopback conns allocated, although rather useless.
  */
 static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 {
+	struct rds_loop_connection *lc;
+	unsigned long flags;
+
+	lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL);
+	if (lc == NULL)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&lc->loop_node);
+	lc->conn = conn;
+	conn->c_transport_data = lc;
+
+	spin_lock_irqsave(&loop_conns_lock, flags);
+	list_add_tail(&lc->loop_node, &loop_conns);
+	spin_unlock_irqrestore(&loop_conns_lock, flags);
+
 	return 0;
 }
-static void rds_loop_conn_free(void *data)
+
+static void rds_loop_conn_free(void *arg)
 {
+	struct rds_loop_connection *lc = arg;
+	rdsdebug("lc %p\n", lc);
+	list_del(&lc->loop_node);
+	kfree(lc);
 }
+
 static int rds_loop_conn_connect(struct rds_connection *conn)
 {
 	rds_connect_complete(conn);
 	return 0;
 }
+
 static void rds_loop_conn_shutdown(struct rds_connection *conn)
 {
 }
 
+void rds_loop_exit(void)
+{
+	struct rds_loop_connection *lc, *_lc;
+	LIST_HEAD(tmp_list);
+
+	/* avoid calling conn_destroy with irqs off */
+	spin_lock_irq(&loop_conns_lock);
+	list_splice(&loop_conns, &tmp_list);
+	INIT_LIST_HEAD(&loop_conns);
+	spin_unlock_irq(&loop_conns_lock);
+
+	list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
+		WARN_ON(lc->conn->c_passive);
+		rds_conn_destroy(lc->conn);
+	}
+}
+
 /*
  * This is missing .xmit_* because loop doesn't go through generic
  * rds_send_xmit() and doesn't call rds_recv_incoming().  .listen_stop and

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/loop.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,7 +1,9 @@
 #ifndef _RDS_LOOP_H
-#define _RDS_LOOP_H 
+#define _RDS_LOOP_H
 
 /* loop.c */
 extern struct rds_transport rds_loop_transport;
 
+void rds_loop_exit(void);
+
 #endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/message.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/message.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/message.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -71,6 +71,8 @@
 
 	if (rm->m_rdma_op)
 		rds_rdma_free_op(rm->m_rdma_op);
+	if (rm->m_rdma_mr)
+		rds_mr_put(rm->m_rdma_mr);
 }
 
 void rds_message_inc_purge(struct rds_incoming *inc)
@@ -100,7 +102,7 @@
 }
 
 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
-			         __be16 dport, u64 seq)
+				 __be16 dport, u64 seq)
 {
 	hdr->h_flags = 0;
 	hdr->h_sport = sport;
@@ -108,6 +110,7 @@
 	hdr->h_sequence = cpu_to_be64(seq);
 	hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
 }
+EXPORT_SYMBOL_GPL(rds_message_populate_header);
 
 int rds_message_add_extension(struct rds_header *hdr,
 		unsigned int type, const void *data, unsigned int len)
@@ -133,6 +136,7 @@
 	dst[len] = RDS_EXTHDR_NONE;
 	return 1;
 }
+EXPORT_SYMBOL_GPL(rds_message_add_extension);
 
 /*
  * If a message has extension headers, retrieve them here.
@@ -208,16 +212,25 @@
 	ext_hdr.h_rdma_offset = cpu_to_be32(offset);
 	return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
 }
+EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
 
 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp)
 {
 	struct rds_message *rm;
 
-	rm = kzalloc(sizeof(struct rds_message) + 
-		     (nents * sizeof (struct scatterlist)), gfp);
-	if (rm == NULL)
+	rm = kzalloc(sizeof(struct rds_message) +
+		     (nents * sizeof(struct scatterlist)), gfp);
+	if (!rm)
 		goto out;
 
+#ifdef CONFIG_DEBUG_SG
+{
+	unsigned int i;
+
+	for (i=0; i < nents; i++)
+		rm->m_sg[i].sg_magic = SG_MAGIC;
+}
+#endif
 	atomic_set(&rm->m_refcount, 1);
 	INIT_LIST_HEAD(&rm->m_sock_item);
 	INIT_LIST_HEAD(&rm->m_conn_item);
@@ -296,7 +309,7 @@
 
 		rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
 			 "sg [%p, %u, %u] + %lu\n",
-			 to_copy, iov->iov_base, iov->iov_len, iov_off, 
+			 to_copy, iov->iov_base, iov->iov_len, iov_off,
 			 (void *)sg_page(sg), sg->offset, sg->length, sg_off);
 
 		ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
@@ -357,7 +370,7 @@
 
 		rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
 			 "sg [%p, %u, %u] + %lu\n",
-			 to_copy, iov->iov_base, iov->iov_len, iov_off, 
+			 to_copy, iov->iov_base, iov->iov_len, iov_off,
 			 sg_page(sg), sg->offset, sg->length, vec_off);
 
 		ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
@@ -397,3 +410,5 @@
 	if (waitqueue_active(&rds_message_flush_waitq))
 		wake_up(&rds_message_flush_waitq);
 }
+EXPORT_SYMBOL_GPL(rds_message_unmapped);
+

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/page.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/page.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/page.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -68,7 +68,7 @@
 	else
 		ret = __copy_from_user_inatomic(addr + offset, ptr, bytes);
 	kunmap_atomic(addr, KM_USER0);
-	
+
 	if (ret) {
 		addr = kmap(page);
 		if (to_user)
@@ -82,9 +82,10 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(rds_page_copy_user);
 
 /*
- * Message allocation uses this to build up regions of a message.  
+ * Message allocation uses this to build up regions of a message.
  *
  * @bytes - the number of bytes needed.
  * @gfp - the waiting behaviour of the allocation
@@ -127,7 +128,7 @@
 	rem = &rds_per_cpu(rds_page_remainders, get_cpu());
 	local_irq_save(flags);
 
-	for(;;) {
+	while (1) {
 		/* avoid a tiny region getting stuck by tossing it */
 		if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) {
 			rds_stats_inc(s_page_remainder_miss);
@@ -188,7 +189,7 @@
 
 static int rds_page_remainder_cpu_notify(struct notifier_block *self,
 					 unsigned long action, void *hcpu)
-{         
+{
 	struct rds_page_remainder *rem;
 	long cpu = (long)hcpu;
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -33,7 +33,6 @@
 #include <linux/pagemap.h>
 #include <linux/rbtree.h>
 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
-#include <asm/byteorder.h>
 
 #include "rdma.h"
 
@@ -53,7 +52,7 @@
  * causes the address to wrap or overflows an unsigned int.  This comes
  * from being stored in the 'length' member of 'struct scatterlist'.
  */
-unsigned int rds_pages_in_vec(struct rds_iovec *vec)
+static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
 {
 	if ((vec->addr + vec->bytes <= vec->addr) ||
 	    (vec->bytes > (u64)UINT_MAX))
@@ -116,11 +115,8 @@
 		mr->r_trans->free_mr(trans_private, mr->r_invalidate);
 }
 
-static void rds_mr_put(struct rds_mr *mr)
+void __rds_put_mr_final(struct rds_mr *mr)
 {
-	if (!atomic_dec_and_test(&mr->r_refcount))
-		return;
-
 	rds_destroy_mr(mr);
 	kfree(mr);
 }
@@ -159,7 +155,7 @@
 			     nr_pages, write, 0, pages, NULL);
 	up_read(&current->mm->mmap_sem);
 
-	if (0 <= ret && (unsigned) ret < nr_pages) {
+	if (ret > 0 && (unsigned) ret < nr_pages) {
 		while (ret--)
 			put_page(pages[ret]);
 		ret = -EFAULT;
@@ -169,7 +165,7 @@
 }
 
 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
-				u64 *cookie_ret)
+				u64 *cookie_ret, struct rds_mr **mr_ret)
 {
 	struct rds_mr *mr = NULL, *found;
 	unsigned int nr_pages;
@@ -198,7 +194,7 @@
 		goto out;
 	}
 
-	rdsdebug("RDS: get_mr addr %Lx len %Lu nr_pages %u\n",
+	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
 		args->vec.addr, args->vec.bytes, nr_pages);
 
 	/* XXX clamp nr_pages to limit the size of this alloc? */
@@ -248,8 +244,12 @@
 	}
 
 	/* Stick all pages into the scatterlist */
-	for (i = 0 ; i < nents; i++)
+	for (i = 0 ; i < nents; i++) {
+#ifdef CONFIG_DEBUG_SG
+		sg[i].sg_magic = SG_MAGIC;
+#endif
 		sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
+	}
 
 	rdsdebug("RDS: trans_private nents is %u\n", nents);
 
@@ -257,8 +257,7 @@
 	 * s/g list is now owned by the MR.
 	 * Note that dma_map() implies that pending writes are
 	 * flushed to RAM, so no dma_sync is needed here. */
-	trans_private = rs->rs_transport->get_mr(sg, nents,
-						 rs->rs_bound_addr, 
+	trans_private = rs->rs_transport->get_mr(sg, nents, rs,
 						 &mr->r_key);
 
 	if (IS_ERR(trans_private)) {
@@ -296,11 +295,14 @@
 	BUG_ON(found && found != mr);
 
 	rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
+	if (mr_ret) {
+		atomic_inc(&mr->r_refcount);
+		*mr_ret = mr;
+	}
 
 	ret = 0;
 out:
-	if (pages)
-		kfree(pages);
+	kfree(pages);
 	if (mr)
 		rds_mr_put(mr);
 	return ret;
@@ -310,14 +312,14 @@
 {
 	struct rds_get_mr_args args;
 
-	if (optlen != sizeof (struct rds_get_mr_args))
+	if (optlen != sizeof(struct rds_get_mr_args))
 		return -EINVAL;
 
 	if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
 			   sizeof(struct rds_get_mr_args)))
 		return -EFAULT;
 
-	return __rds_rdma_map(rs, &args, NULL);
+	return __rds_rdma_map(rs, &args, NULL, NULL);
 }
 
 /*
@@ -422,9 +424,7 @@
 		put_page(page);
 	}
 
-	if (ro->r_notifier)
-		kfree(ro->r_notifier);
-
+	kfree(ro->r_notifier);
 	kfree(ro);
 }
 
@@ -524,7 +524,7 @@
 
 	nr_bytes = 0;
 
-	rdsdebug("RDS: rdma prepare nr_local %Lu rva %Lx rkey %x\n",
+	rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
 	       (unsigned long long)args->nr_local,
 	       (unsigned long long)args->remote_vec.addr,
 	       op->r_key);
@@ -542,6 +542,9 @@
 			goto out;
 		}
 
+		rs->rs_user_addr = vec.addr;
+		rs->rs_user_bytes = vec.bytes;
+
 		/* did the user change the vec under us? */
 		if (nr > max_pages || op->r_nents + nr > nr_pages) {
 			ret = -EINVAL;
@@ -554,7 +557,7 @@
 		if (ret < 0)
 			goto out;
 
-		rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %Lu vec.addr %Lx\n",
+		rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
 		       nr_bytes, nr, vec.bytes, vec.addr);
 
 		nr_bytes += vec.bytes;
@@ -563,11 +566,14 @@
 			unsigned int offset = vec.addr & ~PAGE_MASK;
 
 			sg = &op->r_sg[op->r_nents + j];
+#ifdef CONFIG_DEBUG_SG
+			sg->sg_magic = SG_MAGIC;
+#endif
 			sg_set_page(sg, pages[j],
 					min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
 					offset);
 
-			rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %Lx vec.bytes %Lu\n",
+			rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
 			       sg->offset, sg->length, vec.addr, vec.bytes);
 
 			vec.addr += sg->length;
@@ -589,8 +595,7 @@
 
 	ret = 0;
 out:
-	if (pages)
-		kfree(pages);
+	kfree(pages);
 	if (ret) {
 		if (op)
 			rds_rdma_free_op(op);
@@ -655,7 +660,7 @@
 
 	if (mr) {
 		mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
-		rds_mr_put(mr);
+		rm->m_rdma_mr = mr;
 	}
 	return err;
 }
@@ -673,5 +678,5 @@
 	 || rm->m_rdma_cookie != 0)
 		return -EINVAL;
 
-	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie);
+	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
 }

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,9 +1,9 @@
 #ifndef _RDS_RDMA_H
-#define _RDS_RDMA_H 
+#define _RDS_RDMA_H
 
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
 
 #include "rds.h"
 
@@ -13,16 +13,16 @@
 	u32			r_key;
 
 	/* A copy of the creation flags */
-	unsigned int		r_use_once : 1,
-				r_invalidate : 1,
-				r_write : 1;
+	unsigned int		r_use_once:1;
+	unsigned int		r_invalidate:1;
+	unsigned int		r_write:1;
 
 	/* This is for RDS_MR_DEAD.
 	 * It would be nice & consistent to make this part of the above
 	 * bit field here, but we need to use test_and_set_bit.
 	 */
 	unsigned long		r_state;
-	struct rds_sock *	r_sock;		/* back pointer to the socket that owns us */
+	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
 	struct rds_transport	*r_trans;
 	void			*r_trans_private;
 };
@@ -33,12 +33,12 @@
 struct rds_rdma_op {
 	u32			r_key;
 	u64			r_remote_addr;
-	unsigned		r_write   : 1,
-				r_fence   : 1,
-				r_notify  : 1,
-				r_recverr : 1,
-				r_mapped  : 1;
-	struct rds_notifier *	r_notifier;
+	unsigned int		r_write:1;
+	unsigned int		r_fence:1;
+	unsigned int		r_notify:1;
+	unsigned int		r_recverr:1;
+	unsigned int		r_mapped:1;
+	struct rds_notifier	*r_notifier;
 	unsigned int		r_bytes;
 	unsigned int		r_nents;
 	unsigned int		r_count;
@@ -74,4 +74,11 @@
 void rds_rdma_free_op(struct rds_rdma_op *ro);
 void rds_rdma_send_complete(struct rds_message *rm, int);
 
+extern void __rds_put_mr_final(struct rds_mr *mr);
+static inline void rds_mr_put(struct rds_mr *mr)
+{
+	if (atomic_dec_and_test(&mr->r_refcount))
+		__rds_put_mr_final(mr);
+}
+
 #endif

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.c	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2009 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <rdma/rdma_cm.h>
+
+#include "rdma_transport.h"
+
+static struct rdma_cm_id *rds_rdma_listen_id;
+
+int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
+			      struct rdma_cm_event *event)
+{
+	/* this can be null in the listening path */
+	struct rds_connection *conn = cm_id->context;
+	struct rds_transport *trans;
+	int ret = 0;
+
+	rdsdebug("conn %p id %p handling event %u\n", conn, cm_id,
+		 event->event);
+
+	if (cm_id->device->node_type == RDMA_NODE_RNIC)
+		trans = &rds_iw_transport;
+	else
+		trans = &rds_ib_transport;
+
+	/* Prevent shutdown from tearing down the connection
+	 * while we're executing. */
+	if (conn) {
+		mutex_lock(&conn->c_cm_lock);
+
+		/* If the connection is being shut down, bail out
+		 * right away. We return 0 so cm_id doesn't get
+		 * destroyed prematurely */
+		if (rds_conn_state(conn) == RDS_CONN_DISCONNECTING) {
+			/* Reject incoming connections while we're tearing
+			 * down an existing one. */
+			if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST)
+				ret = 1;
+			goto out;
+		}
+	}
+
+	switch (event->event) {
+	case RDMA_CM_EVENT_CONNECT_REQUEST:
+		ret = trans->cm_handle_connect(cm_id, event);
+		break;
+
+	case RDMA_CM_EVENT_ADDR_RESOLVED:
+		/* XXX do we need to clean up if this fails? */
+		ret = rdma_resolve_route(cm_id,
+					 RDS_RDMA_RESOLVE_TIMEOUT_MS);
+		break;
+
+	case RDMA_CM_EVENT_ROUTE_RESOLVED:
+		/* XXX worry about racing with listen acceptance */
+		ret = trans->cm_initiate_connect(cm_id);
+		break;
+
+	case RDMA_CM_EVENT_ESTABLISHED:
+		trans->cm_connect_complete(conn, event);
+		break;
+
+	case RDMA_CM_EVENT_ADDR_ERROR:
+	case RDMA_CM_EVENT_ROUTE_ERROR:
+	case RDMA_CM_EVENT_CONNECT_ERROR:
+	case RDMA_CM_EVENT_UNREACHABLE:
+	case RDMA_CM_EVENT_REJECTED:
+	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+	case RDMA_CM_EVENT_ADDR_CHANGE:
+		if (conn)
+			rds_conn_drop(conn);
+		break;
+
+	case RDMA_CM_EVENT_DISCONNECTED:
+		printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection "
+			"%u.%u.%u.%u->%u.%u.%u.%u\n", NIPQUAD(conn->c_laddr),
+			 NIPQUAD(conn->c_faddr));
+		rds_conn_drop(conn);
+		break;
+
+	default:
+		/* things like device disconnect? */
+		printk(KERN_ERR "unknown event %u\n", event->event);
+		BUG();
+		break;
+	}
+
+out:
+	if (conn) {
+		//struct rds_iw_connection *ic = conn->c_transport_data;
+
+		/* If we return non-zero, we must to hang on to the cm_id */
+		//BUG_ON(ic->i_cm_id == cm_id && ret);
+
+		mutex_unlock(&conn->c_cm_lock);
+	}
+
+	rdsdebug("id %p event %u handling ret %d\n", cm_id, event->event, ret);
+
+	return ret;
+}
+
+static int __init rds_rdma_listen_init(void)
+{
+	struct sockaddr_in sin;
+	struct rdma_cm_id *cm_id;
+	int ret;
+
+	cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP);
+	if (IS_ERR(cm_id)) {
+		ret = PTR_ERR(cm_id);
+		printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
+		       "rdma_create_id() returned %d\n", ret);
+		goto out;
+	}
+
+	sin.sin_family = PF_INET,
+	sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
+	sin.sin_port = (__force u16)htons(RDS_PORT);
+
+	/*
+	 * XXX I bet this binds the cm_id to a device.  If we want to support
+	 * fail-over we'll have to take this into consideration.
+	 */
+	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+	if (ret) {
+		printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
+		       "rdma_bind_addr() returned %d\n", ret);
+		goto out;
+	}
+
+	ret = rdma_listen(cm_id, 128);
+	if (ret) {
+		printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
+		       "rdma_listen() returned %d\n", ret);
+		goto out;
+	}
+
+	rdsdebug("cm %p listening on port %u\n", cm_id, RDS_PORT);
+
+	rds_rdma_listen_id = cm_id;
+	cm_id = NULL;
+out:
+	if (cm_id)
+		rdma_destroy_id(cm_id);
+	return ret;
+}
+
+static void rds_rdma_listen_stop(void)
+{
+	if (rds_rdma_listen_id) {
+		rdsdebug("cm %p\n", rds_rdma_listen_id);
+		rdma_destroy_id(rds_rdma_listen_id);
+		rds_rdma_listen_id = NULL;
+	}
+}
+
+int __init rds_rdma_init(void)
+{
+	int ret;
+
+	ret = rds_rdma_listen_init();
+	if (ret)
+		goto out;
+
+	ret = rds_iw_init();
+	if (ret)
+		goto err_iw_init;
+
+	ret = rds_ib_init();
+	if (ret)
+		goto err_ib_init;
+
+	goto out;
+
+err_ib_init:
+	rds_iw_exit();
+err_iw_init:
+	rds_rdma_listen_stop();
+out:
+	return ret;
+}
+
+void rds_rdma_exit(void)
+{
+	/* stop listening first to ensure no new connections are attempted */
+	rds_rdma_listen_stop();
+	rds_ib_exit();
+	rds_iw_exit();
+}
+

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rdma_transport.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,28 @@
+#ifndef _RDMA_TRANSPORT_H
+#define _RDMA_TRANSPORT_H
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include "rds.h"
+
+#define RDS_RDMA_RESOLVE_TIMEOUT_MS     5000
+
+int rds_rdma_conn_connect(struct rds_connection *conn);
+int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
+			      struct rdma_cm_event *event);
+
+/* from rdma_transport.c */
+int rds_rdma_init(void);
+void rds_rdma_exit(void);
+
+/* from ib.c */
+extern struct rds_transport rds_ib_transport;
+int rds_ib_init(void);
+void rds_ib_exit(void);
+
+/* from iw.c */
+extern struct rds_transport rds_iw_transport;
+int rds_iw_init(void);
+void rds_iw_exit(void);
+
+#endif

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds.h	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -1,12 +1,13 @@
 #ifndef _RDS_H
-#define _RDS_H 
+#define _RDS_H
 
 #include <net/sock.h>
 #include <linux/scatterlist.h>
+#include <rdma/rdma_cm.h>
 #include <asm/atomic.h>
 
 #include <linux/mutex.h>
-#include "ib_rds.h"
+#include "rds_rdma.h"
 
 /*
  * RDS Network protocol version
@@ -63,15 +64,59 @@
 #endif
 
 /*
+ * RDS trace facilities
+ */
+enum {
+  RDS_BIND = 0,
+  RDS_CONG,
+  RDS_CONNECTION,
+  RDS_RDMA,
+  RDS_PAGE,
+  RDS_SEND,
+  RDS_RECV,
+  RDS_THREADS,
+  RDS_INFO,
+  RDS_MESSAGE,
+  RDS_IB,
+  RDS_IB_CM,
+  RDS_IB_RDMA,
+  RDS_IB_RING,
+  RDS_IB_RECV,
+  RDS_IB_SEND,
+  RDS_TCP,
+  RDS_TCP_CONNECT,
+  RDS_TCP_LISTEN,
+  RDS_TCP_RECV,
+  RDS_TCP_SEND
+};
+
+enum {
+  RDS_ALWAYS = 0,
+  RDS_MINIMAL,
+  RDS_LOW,
+  RDS_MEDIUM,
+  RDS_HIGH,
+  RDS_VERBOSE
+};
+
+
+#define rdstrace(fac, lvl, fmt, args...) do { 			\
+	if (test_bit(fac, &rds_sysctl_trace_flags) &&		\
+	  lvl <= rds_sysctl_trace_level)			\
+		printk("%s(): " fmt, __func__, ##args); 	\
+} while(0);
+
+
+/*
  * This is the sad making.  Some kernels have a bug in the per_cpu() api which
  * makes DEFINE_PER_CPU trigger an oops on insmod because the per-cpu section
  * in the module is not cacheline-aligned.  As much as we'd like to tell users
  * with older kernels to stuff it, that's not reasonable.  We'll roll our own
  * until this doesn't have to build against older kernels.
  */
-#define RDS_DEFINE_PER_CPU(type, var)  type var[NR_CPUS] 
-#define RDS_DECLARE_PER_CPU(type, var)  extern type var[NR_CPUS] 
-#define rds_per_cpu(var, cpu)  var[cpu] 
+#define RDS_DEFINE_PER_CPU(type, var)  type var[NR_CPUS]
+#define RDS_DECLARE_PER_CPU(type, var)  extern type var[NR_CPUS]
+#define rds_per_cpu(var, cpu)  var[cpu]
 
 /* XXX is there one of these somewhere? */
 #define ceil(x, y) \
@@ -116,13 +161,13 @@
 	struct hlist_node	c_hash_node;
 	__be32			c_laddr;
 	__be32			c_faddr;
-	unsigned int		c_loopback : 1;
-	struct rds_connection *	c_passive;
+	unsigned int		c_loopback:1;
+	struct rds_connection	*c_passive;
 
 	struct rds_cong_map	*c_lcong;
 	struct rds_cong_map	*c_fcong;
 
-	struct semaphore	c_send_sem;
+	struct mutex		c_send_lock;	/* protect send ring */
 	struct rds_message	*c_xmit_rm;
 	unsigned long		c_xmit_sg;
 	unsigned int		c_xmit_hdr_off;
@@ -146,7 +191,7 @@
 	struct delayed_work	c_recv_w;
 	struct delayed_work	c_conn_w;
 	struct work_struct	c_down_w;
-	struct mutex		c_cm_lock;
+	struct mutex		c_cm_lock;	/* protect conn state & cm */
 
 	struct list_head	c_map_item;
 	unsigned long		c_map_queued;
@@ -292,6 +337,7 @@
 	struct rds_sock		*m_rs;
 	struct rds_rdma_op	*m_rdma_op;
 	rds_rdma_cookie_t	m_rdma_cookie;
+	struct rds_mr		*m_rdma_mr;
 	unsigned int		m_nents;
 	unsigned int		m_count;
 	struct scatterlist	m_sg[0];
@@ -345,7 +391,8 @@
 	struct list_head	t_item;
 	struct module		*t_owner;
 	char			*t_name;
-	unsigned int		t_prefer_loopback : 1;
+	unsigned int		t_prefer_loopback:1;
+
 	int (*laddr_check)(__be32 addr);
 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
 	void (*conn_free)(void *data);
@@ -363,12 +410,18 @@
 				size_t size);
 	void (*inc_purge)(struct rds_incoming *inc);
 	void (*inc_free)(struct rds_incoming *inc);
-	void (*listen_stop)(void);
+
+	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
+				 struct rdma_cm_event *event);
+	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
+	void (*cm_connect_complete)(struct rds_connection *conn,
+				    struct rdma_cm_event *event);
+
 	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
 					unsigned int avail);
 	void (*exit)(void);
 	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
-			__be32 ip_addr, u32 *key_ret);
+			struct rds_sock *rs, u32 *key_ret);
 	void (*sync_mr)(void *trans_private, int direction);
 	void (*free_mr)(void *trans_private, int invalidate);
 	void (*flush_mrs)(void);
@@ -382,6 +435,9 @@
 	struct sock		*rs_sk;
 #endif
 
+	u64			rs_user_addr;
+	u64			rs_user_bytes;
+
 	/*
 	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
 	 * support.
@@ -392,7 +448,7 @@
 	__be16			rs_bound_port;
 	__be16			rs_conn_port;
 
-	/* 
+	/*
 	 * This is only used to communicate the transport between bind and
 	 * initiating connections.  All other trans use is referenced through
 	 * the connection.
@@ -405,6 +461,9 @@
 	 */
 	struct rds_connection	*rs_conn;
 
+	/* flag indicating we were congested or not */
+	int			rs_congested;
+
 	/* rs_lock protects all these adjacent members before the newline */
 	spinlock_t		rs_lock;
 	struct list_head	rs_send_queue;
@@ -523,6 +582,7 @@
 	if (!sock_flag(sk, SOCK_DEAD) && waitq)
 		wake_up(waitq);
 }
+extern wait_queue_head_t rds_poll_waitq;
 
 
 /* bind.c */
@@ -552,6 +612,7 @@
 				       struct rds_transport *trans, gfp_t gfp);
 struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
 			       struct rds_transport *trans, gfp_t gfp);
+void rds_conn_destroy(struct rds_connection *conn);
 void rds_conn_reset(struct rds_connection *conn);
 void rds_conn_drop(struct rds_connection *conn);
 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
@@ -562,12 +623,12 @@
 void __rds_conn_error(struct rds_connection *conn, const char *, ...)
 				__attribute__ ((format (printf, 2, 3)));
 #define rds_conn_error(conn, fmt...) \
-	__rds_conn_error(conn, KERN_WARNING "RDS: " fmt )
+	__rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
 
 static inline int
 rds_conn_transition(struct rds_connection *conn, int old, int new)
 {
-	return (atomic_cmpxchg(&conn->c_state, old, new) == old);
+	return atomic_cmpxchg(&conn->c_state, old, new) == old;
 }
 
 static inline int
@@ -582,17 +643,23 @@
 	return atomic_read(&conn->c_state) == RDS_CONN_UP;
 }
 
+static inline int
+rds_conn_connecting(struct rds_connection *conn)
+{
+	return atomic_read(&conn->c_state) == RDS_CONN_CONNECTING;
+}
+
 /* message.c */
 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
 struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
 					       size_t total_len);
 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
-			         __be16 dport, u64 seq);
+				 __be16 dport, u64 seq);
 int rds_message_add_extension(struct rds_header *hdr,
-		                unsigned int type, const void *data, unsigned int len);
+			      unsigned int type, const void *data, unsigned int len);
 int rds_message_next_extension(struct rds_header *hdr,
-		                unsigned int *pos, void *buf, unsigned int *buflen);
+			       unsigned int *pos, void *buf, unsigned int *buflen);
 int rds_message_add_version_extension(struct rds_header *hdr, unsigned int version);
 int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *version);
 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
@@ -691,6 +758,8 @@
 extern unsigned int  rds_sysctl_max_unacked_packets;
 extern unsigned int  rds_sysctl_max_unacked_bytes;
 extern unsigned int  rds_sysctl_ping_enable;
+extern unsigned long rds_sysctl_trace_flags;
+extern unsigned int  rds_sysctl_trace_level;
 
 /* threads.c */
 int __init rds_threads_init(void);
@@ -706,7 +775,6 @@
 int rds_trans_register(struct rds_transport *trans);
 void rds_trans_unregister(struct rds_transport *trans);
 struct rds_transport *rds_trans_get_preferred(__be32 addr);
-void rds_trans_stop_listening(void);
 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
 				       unsigned int avail);
 int __init rds_trans_init(void);

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds_rdma.h
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds_rdma.h	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/rds_rdma.h	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2008 Oracle.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_RDS_H
+#define IB_RDS_H
+
+#include <linux/types.h>
+
+/* These sparse annotated types shouldn't be in any user
+ * visible header file. We should clean this up rather
+ * than kludging around them. */
+#ifndef __KERNEL__
+#define __be16	u_int16_t
+#define __be32	u_int32_t
+#define __be64	u_int64_t
+#endif
+
+#define RDS_IB_ABI_VERSION		0x301
+
+/*
+ * setsockopt/getsockopt for SOL_RDS
+ */
+#define RDS_CANCEL_SENT_TO      	1
+#define RDS_GET_MR			2
+#define RDS_FREE_MR			3
+/* deprecated: RDS_BARRIER 4 */
+#define RDS_RECVERR			5
+#define RDS_CONG_MONITOR		6
+
+/*
+ * Control message types for SOL_RDS.
+ *
+ * CMSG_RDMA_ARGS (sendmsg)
+ *	Request a RDMA transfer to/from the specified
+ *	memory ranges.
+ *	The cmsg_data is a struct rds_rdma_args.
+ * RDS_CMSG_RDMA_DEST (recvmsg, sendmsg)
+ *	Kernel informs application about intended
+ *	source/destination of a RDMA transfer
+ * RDS_CMSG_RDMA_MAP (sendmsg)
+ *	Application asks kernel to map the given
+ *	memory range into a IB MR, and send the
+ *	R_Key along in an RDS extension header.
+ *	The cmsg_data is a struct rds_get_mr_args,
+ *	the same as for the GET_MR setsockopt.
+ * RDS_CMSG_RDMA_STATUS (recvmsg)
+ *	Returns the status of a completed RDMA operation.
+ */
+#define RDS_CMSG_RDMA_ARGS		1
+#define RDS_CMSG_RDMA_DEST		2
+#define RDS_CMSG_RDMA_MAP		3
+#define RDS_CMSG_RDMA_STATUS		4
+#define RDS_CMSG_CONG_UPDATE		5
+
+#define RDS_INFO_FIRST			10000
+#define RDS_INFO_COUNTERS		10000
+#define RDS_INFO_CONNECTIONS		10001
+/* 10002 aka RDS_INFO_FLOWS is deprecated */
+#define RDS_INFO_SEND_MESSAGES		10003
+#define RDS_INFO_RETRANS_MESSAGES       10004
+#define RDS_INFO_RECV_MESSAGES          10005
+#define RDS_INFO_SOCKETS                10006
+#define RDS_INFO_TCP_SOCKETS            10007
+#define RDS_INFO_IB_CONNECTIONS		10008
+#define RDS_INFO_CONNECTION_STATS	10009
+#define RDS_INFO_IWARP_CONNECTIONS	10010
+#define RDS_INFO_LAST			10010
+
+struct rds_info_counter {
+	u_int8_t	name[32];
+	u_int64_t	value;
+} __attribute__((packed));
+
+#define RDS_INFO_CONNECTION_FLAG_SENDING	0x01
+#define RDS_INFO_CONNECTION_FLAG_CONNECTING	0x02
+#define RDS_INFO_CONNECTION_FLAG_CONNECTED	0x04
+
+struct rds_info_connection {
+	u_int64_t	next_tx_seq;
+	u_int64_t	next_rx_seq;
+	__be32		laddr;
+	__be32		faddr;
+	u_int8_t	transport[15];		/* null term ascii */
+	u_int8_t	flags;
+} __attribute__((packed));
+
+struct rds_info_flow {
+	__be32		laddr;
+	__be32		faddr;
+	u_int32_t	bytes;
+	__be16		lport;
+	__be16		fport;
+} __attribute__((packed));
+
+#define RDS_INFO_MESSAGE_FLAG_ACK               0x01
+#define RDS_INFO_MESSAGE_FLAG_FAST_ACK          0x02
+
+struct rds_info_message {
+	u_int64_t	seq;
+	u_int32_t	len;
+	__be32		laddr;
+	__be32		faddr;
+	__be16		lport;
+	__be16		fport;
+	u_int8_t	flags;
+} __attribute__((packed));
+
+struct rds_info_socket {
+	u_int32_t	sndbuf;
+	__be32		bound_addr;
+	__be32		connected_addr;
+	__be16		bound_port;
+	__be16		connected_port;
+	u_int32_t	rcvbuf;
+	u_int64_t	inum;
+} __attribute__((packed));
+
+#define RDS_IB_GID_LEN	16
+struct rds_info_rdma_connection {
+	__be32		src_addr;
+	__be32		dst_addr;
+	uint8_t		src_gid[RDS_IB_GID_LEN];
+	uint8_t		dst_gid[RDS_IB_GID_LEN];
+
+	uint32_t	max_send_wr;
+	uint32_t	max_recv_wr;
+	uint32_t	max_send_sge;
+	uint32_t	rdma_mr_max;
+	uint32_t	rdma_mr_size;
+};
+
+/*
+ * Congestion monitoring.
+ * Congestion control in RDS happens at the host connection
+ * level by exchanging a bitmap marking congested ports.
+ * By default, a process sleeping in poll() is always woken
+ * up when the congestion map is updated.
+ * With explicit monitoring, an application can have more
+ * fine-grained control.
+ * The application installs a 64bit mask value in the socket,
+ * where each bit corresponds to a group of ports.
+ * When a congestion update arrives, RDS checks the set of
+ * ports that are now uncongested against the list bit mask
+ * installed in the socket, and if they overlap, we queue a
+ * cong_notification on the socket.
+ *
+ * To install the congestion monitor bitmask, use RDS_CONG_MONITOR
+ * with the 64bit mask.
+ * Congestion updates are received via RDS_CMSG_CONG_UPDATE
+ * control messages.
+ *
+ * The correspondence between bits and ports is
+ *	1 << (portnum % 64)
+ */
+#define RDS_CONG_MONITOR_SIZE	64
+#define RDS_CONG_MONITOR_BIT(port)  (((unsigned int) port) % RDS_CONG_MONITOR_SIZE)
+#define RDS_CONG_MONITOR_MASK(port) (1ULL << RDS_CONG_MONITOR_BIT(port))
+
+/*
+ * RDMA related types
+ */
+
+/*
+ * This encapsulates a remote memory location.
+ * In the current implementation, it contains the R_Key
+ * of the remote memory region, and the offset into it
+ * (so that the application does not have to worry about
+ * alignment).
+ */
+typedef u_int64_t	rds_rdma_cookie_t;
+
+struct rds_iovec {
+	u_int64_t	addr;
+	u_int64_t	bytes;
+};
+
+struct rds_get_mr_args {
+	struct rds_iovec vec;
+	u_int64_t	cookie_addr;
+	uint64_t	flags;
+};
+
+struct rds_free_mr_args {
+	rds_rdma_cookie_t cookie;
+	u_int64_t	flags;
+};
+
+struct rds_rdma_args {
+	rds_rdma_cookie_t cookie;
+	struct rds_iovec remote_vec;
+	u_int64_t	local_vec_addr;
+	u_int64_t	nr_local;
+	u_int64_t	flags;
+	u_int64_t	user_token;
+};
+
+struct rds_rdma_notify {
+	u_int64_t	user_token;
+	int32_t		status;
+};
+
+#define RDS_RDMA_SUCCESS	0
+#define RDS_RDMA_REMOTE_ERROR	1
+#define RDS_RDMA_CANCELED	2
+#define RDS_RDMA_DROPPED	3
+#define RDS_RDMA_OTHER_ERROR	4
+
+/*
+ * Common set of flags for all RDMA related structs
+ */
+#define RDS_RDMA_READWRITE	0x0001
+#define RDS_RDMA_FENCE		0x0002	/* use FENCE for immediate send */
+#define RDS_RDMA_INVALIDATE	0x0004	/* invalidate R_Key after freeing MR */
+#define RDS_RDMA_USE_ONCE	0x0008	/* free MR after use */
+#define RDS_RDMA_DONTWAIT	0x0010	/* Don't wait in SET_BARRIER */
+#define RDS_RDMA_NOTIFY_ME	0x0020	/* Notify when operation completes */
+
+#endif /* IB_RDS_H */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/recv.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/recv.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/recv.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -70,30 +70,40 @@
 				  struct rds_cong_map *map,
 				  int delta, __be16 port)
 {
-	int was_congested;
 	int now_congested;
 
 	if (delta == 0)
 		return;
 
-	was_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
 	rs->rs_rcv_bytes += delta;
 	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
 
-	rdsdebug("rs %p recv bytes %d buf %d was %d now %d\n",
-		 rs, rs->rs_rcv_bytes, rds_sk_rcvbuf(rs), was_congested,
-		 now_congested);
+	rdsdebug("rs %p recv bytes %d buf %d now_cong %d\n",
+		 rs, rs->rs_rcv_bytes, rds_sk_rcvbuf(rs), now_congested);
 
-	if (was_congested == now_congested)
-		return;
+	rdstrace(RDS_CONG, RDS_VERBOSE,
+	  "rs %p (%u.%u.%u.%u:%u) recv bytes %d buf %d "
+	  "now_cong %d delta %d\n",
+	  rs, NIPQUAD(rs->rs_bound_addr),
+	  (int)ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
+	  rds_sk_rcvbuf(rs), now_congested, delta);
 
-	if (now_congested) {
+	/* wasn't -> am congested */
+	if (!rs->rs_congested && now_congested) {
+		rs->rs_congested = 1;
 		rds_cong_set_bit(map, port);
-	} else {
+		rds_cong_queue_updates(map);
+	}
+	/* was -> aren't congested */
+	/* Require more free space before reporting uncongested to prevent
+           bouncing cong/uncong state too often */
+	else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
+		rs->rs_congested = 0;
 		rds_cong_clear_bit(map, port);
+		rds_cong_queue_updates(map);
 	}
 
-	rds_cong_queue_updates(map);
+	/* do nothing if no change in cong state */
 }
 
 /*
@@ -161,15 +171,15 @@
 	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
 		 "flags 0x%x rx_jiffies %lu\n", conn,
 		 (unsigned long long)conn->c_next_rx_seq,
-		 inc, 
-		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence), 
-		 be32_to_cpu(inc->i_hdr.h_len), 
-		 be16_to_cpu(inc->i_hdr.h_sport), 
-		 be16_to_cpu(inc->i_hdr.h_dport), 
+		 inc,
+		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
+		 be32_to_cpu(inc->i_hdr.h_len),
+		 be16_to_cpu(inc->i_hdr.h_sport),
+		 be16_to_cpu(inc->i_hdr.h_dport),
 		 inc->i_hdr.h_flags,
 		 inc->i_rx_jiffies);
 
-	/* 
+	/*
 	 * Sequence numbers should only increase.  Messages get their
 	 * sequence number as they're queued in a sending conn.  They
 	 * can be dropped, though, if the sending socket is closed before
@@ -418,7 +428,7 @@
 		goto out;
 	}
 
-	for(;;) {
+	while (1) {
 		if (!rds_next_incoming(rs, &inc)) {
 			if (nonblock) {
 				ret = -EAGAIN;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/send.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/send.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/send.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -53,7 +53,7 @@
 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
 
 /*
- * Reset the send state. Caller must hold c_send_sem when calling here.
+ * Reset the send state. Caller must hold c_send_lock when calling here.
  */
 void rds_send_reset(struct rds_connection *conn)
 {
@@ -91,7 +91,7 @@
 
 /*
  * We're making the concious trade-off here to only send one message
- * down the connection at a time.  
+ * down the connection at a time.
  *   Pro:
  *      - tx queueing is a simple fifo list
  *   	- reassembly is optional and easily done by transports per conn
@@ -114,31 +114,32 @@
 	int was_empty = 0;
 	LIST_HEAD(to_be_dropped);
 
-	/* 
+	/*
 	 * sendmsg calls here after having queued its message on the send
 	 * queue.  We only have one task feeding the connection at a time.  If
 	 * another thread is already feeding the queue then we back off.  This
 	 * avoids blocking the caller and trading per-connection data between
 	 * caches per message.
-	 * 
+	 *
 	 * The sem holder will issue a retry if they notice that someone queued
 	 * a message after they stopped walking the send queue but before they
 	 * dropped the sem.
 	 */
-	if (down_trylock(&conn->c_send_sem)) {
+	if (!mutex_trylock(&conn->c_send_lock)) {
 		rds_stats_inc(s_send_sem_contention);
+		ret = -ENOMEM;
 		goto out;
 	}
 
 	if (conn->c_trans->xmit_prepare)
 		conn->c_trans->xmit_prepare(conn);
 
-	/* 
+	/*
 	 * spin trying to push headers and data down the connection until
 	 * the connection doens't make forward progress.
 	 */
 	while (--send_quota) {
-		/* 
+		/*
 		 * See if need to send a congestion map update if we're
 		 * between sending messages.  The send_sem protects our sole
 		 * use of c_map_offset and _bytes.
@@ -319,17 +320,17 @@
 
 	/*
 	 * We might be racing with another sender who queued a message but
-	 * backed off on noticing that we held the c_send_sem.  If we check
+	 * backed off on noticing that we held the c_send_lock.  If we check
 	 * for queued messages after dropping the sem then either we'll
 	 * see the queued message or the queuer will get the sem.  If we
 	 * notice the queued message then we trigger an immediate retry.
 	 *
 	 * We need to be careful only to do this when we stopped processing
 	 * the send queue because it was empty.  It's the only way we
-	 * stop processing the loop when the transport hasn't taken 
+	 * stop processing the loop when the transport hasn't taken
 	 * responsibility for forward progress.
 	 */
-	up(&conn->c_send_sem);
+	mutex_unlock(&conn->c_send_lock);
 
 	if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
 		/* We exhausted the send quota, but there's work left to
@@ -438,6 +439,7 @@
 		sock_put(rds_rs_to_sk(rs));
 	}
 }
+EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 
 /*
  * This is the same as rds_rdma_send_complete except we
@@ -448,12 +450,11 @@
 __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 {
 	struct rds_rdma_op *ro;
-	struct rds_notifier *notifier;
 
 	ro = rm->m_rdma_op;
-	if (ro && ro->r_notify && (notifier = ro->r_notifier) != NULL) {
-		notifier->n_status = status;
-		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
+	if (ro && ro->r_notify && ro->r_notifier) {
+		ro->r_notifier->n_status = status;
+		list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
 		ro->r_notifier = NULL;
 	}
 
@@ -494,15 +495,16 @@
 
 	return found;
 }
+EXPORT_SYMBOL_GPL(rds_send_get_message);
 
 /*
  * This removes messages from the socket's list if they're on it.  The list
  * argument must be private to the caller, we must be able to modify it
- * without locks.  The messages must have a reference held for their 
+ * without locks.  The messages must have a reference held for their
  * position on the list.  This function will drop that reference after
  * removing the messages from the 'messages' list regardless of if it found
  * the messages on the socket list or not.
- */ 
+ */
 void rds_send_remove_from_sock(struct list_head *messages, int status)
 {
 	unsigned long flags = 0; /* silence gcc :P */
@@ -610,12 +612,13 @@
 	/* now remove the messages from the sock list as needed */
 	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 }
+EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 
 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 {
 	struct rds_message *rm, *tmp;
 	struct rds_connection *conn;
-	unsigned long flags;
+	unsigned long flags, flags2;
 	LIST_HEAD(list);
 	int wake = 0;
 
@@ -651,9 +654,9 @@
 	list_for_each_entry(rm, &list, m_sock_item) {
 		/* We do this here rather than in the loop above, so that
 		 * we don't have to nest m_rs_lock under rs->rs_lock */
-		spin_lock(&rm->m_rs_lock);
+		spin_lock_irqsave(&rm->m_rs_lock, flags2);
 		rm->m_rs = NULL;
-		spin_unlock(&rm->m_rs_lock);
+		spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
 
 		/*
 		 * If we see this flag cleared then we're *sure* that someone
@@ -759,8 +762,7 @@
 }
 
 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
-		                          struct msghdr *msg,
-					  int *allocated_mr)
+			 struct msghdr *msg, int *allocated_mr)
 {
 	struct cmsghdr *cmsg;
 	int ret = 0;
@@ -772,6 +774,9 @@
 		if (cmsg->cmsg_level != SOL_RDS)
 			continue;
 
+		/* As a side effect, RDMA_DEST and RDMA_MAP will set
+		 * rm->m_rdma_cookie and rm->m_rdma_mr.
+		 */
 		switch (cmsg->cmsg_type) {
 		case RDS_CMSG_RDMA_ARGS:
 			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
@@ -816,7 +821,7 @@
 	/* Mirror Linux UDP mirror of BSD error message compatibility */
 	/* XXX: Perhaps MSG_MORE someday */
 	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
-		printk("msg_flags 0x%08X\n", msg->msg_flags);
+		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
 		ret = -EOPNOTSUPP;
 		goto out;
 	}
@@ -852,14 +857,11 @@
 
 	rm->m_daddr = daddr;
 
-	/* Parse any control messages the user may have included. */
-	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
-	if (ret)
-		goto out;
-
 	/* rds_conn_create has a spinlock that runs with IRQ off.
 	 * Caching the conn in the socket helps a lot. */
-	if ((conn = rs->rs_conn) == NULL || conn->c_faddr != daddr) {
+	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
+		conn = rs->rs_conn;
+	else {
 		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
 					rs->rs_transport,
 					sock->sk->sk_allocation);
@@ -870,6 +872,11 @@
 		rs->rs_conn = conn;
 	}
 
+	/* Parse any control messages the user may have included. */
+	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+	if (ret)
+		goto out;
+
 	if ((rm->m_rdma_cookie || rm->m_rdma_op)
 	 && conn->c_trans->xmit_rdma == NULL) {
 		if (printk_ratelimit())
@@ -891,7 +898,7 @@
 	if (ret)
 		goto out;
 
-	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, 
+	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
 				  dport, &queued)) {
 		rds_stats_inc(s_send_queue_full);
 		/* XXX make sure this is reasonable */

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/stats.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/stats.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/stats.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -37,6 +37,7 @@
 #include "rds.h"
 
 RDS_DEFINE_PER_CPU(struct rds_statistics, rds_stats) ____cacheline_aligned;
+EXPORT_SYMBOL_GPL(rds_stats);
 
 /* :.,$s/unsigned long\>.*\<s_\(.*\);/"\1",/g */
 
@@ -77,7 +78,7 @@
 };
 
 void rds_stats_info_copy(struct rds_info_iterator *iter,
-                         uint64_t *values, char **names, size_t nr)
+			 uint64_t *values, char **names, size_t nr)
 {
 	struct rds_info_counter ctr;
 	size_t i;
@@ -90,6 +91,7 @@
 		rds_info_copy(iter, &ctr, sizeof(ctr));
 	}
 }
+EXPORT_SYMBOL_GPL(rds_stats_info_copy);
 
 /*
  * This gives global counters across all the transports.  The strings

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/sysctl.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/sysctl.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/sysctl.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -49,7 +49,10 @@
 
 unsigned int rds_sysctl_ping_enable = 1;
 
-/* 
+unsigned long rds_sysctl_trace_flags = 0;
+unsigned int  rds_sysctl_trace_level = 0;
+
+/*
  * These can change over time until they're official.  Until that time we'll
  * give apps a way to figure out what the values are in a given machine.
  */
@@ -117,6 +120,22 @@
 		.mode           = 0644,
 		.proc_handler   = &proc_dointvec,
 	},
+        {
+                .ctl_name       = CTL_UNNUMBERED,
+                .procname       = "trace_flags",
+                .data           = &rds_sysctl_trace_flags,
+                .maxlen         = sizeof(unsigned long),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
+        {
+                .ctl_name       = CTL_UNNUMBERED,
+                .procname       = "trace_level",
+                .data           = &rds_sysctl_trace_level,
+                .maxlen         = sizeof(unsigned int),
+                .mode           = 0644,
+                .proc_handler   = &proc_dointvec,
+        },
 	{ .ctl_name = 0}
 };
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/threads.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/threads.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/threads.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -61,7 +61,7 @@
  *
  * Transition to state DISCONNECTING/DOWN:
  *  -	Inside the shutdown worker; synchronizes with xmit path
- *	through c_send_sem, and with connection management callbacks
+ *	through c_send_lock, and with connection management callbacks
  *	via c_cm_lock.
  *
  *	For receive callbacks, we rely on the underlying transport
@@ -75,13 +75,17 @@
 	if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
 		printk(KERN_WARNING "%s: Cannot transition to state UP, "
 				"current state is %d\n",
-				__FUNCTION__,
+				__func__,
 				atomic_read(&conn->c_state));
 		atomic_set(&conn->c_state, RDS_CONN_ERROR);
 		queue_work(rds_wq, &conn->c_down_w);
 		return;
 	}
 
+	rdstrace(RDS_CONNECTION, RDS_MINIMAL,
+	  "conn %p for %u.%u.%u.%u to %u.%u.%u.%u complete\n",
+	  conn, NIPQUAD(conn->c_laddr),NIPQUAD(conn->c_faddr));
+
 	conn->c_reconnect_jiffies = 0;
 	set_bit(0, &conn->c_map_queued);
 	queue_delayed_work(rds_wq, &conn->c_send_w, 0);
@@ -91,7 +95,7 @@
 
 /*
  * This random exponential backoff is relied on to eventually resolve racing
- * connects. 
+ * connects.
  *
  * If connect attempts race then both parties drop both connections and come
  * here to wait for a random amount of time before trying again.  Eventually
@@ -107,10 +111,15 @@
  * We should *always* start with a random backoff; otherwise a broken connection
  * will always take several iterations to be re-established.
  */
-void rds_queue_reconnect(struct rds_connection *conn)
+static void rds_queue_reconnect(struct rds_connection *conn)
 {
 	unsigned long rand;
 
+	rdstrace(RDS_CONNECTION, RDS_LOW,
+	  "conn %p for %u.%u.%u.%u to %u.%u.%u.%u reconnect jiffies %lu\n",
+	  conn, NIPQUAD(conn->c_laddr), NIPQUAD(conn->c_faddr),
+	  conn->c_reconnect_jiffies);
+
 	set_bit(RDS_RECONNECT_PENDING, &conn->c_flags);
 	if (conn->c_reconnect_jiffies == 0) {
 		conn->c_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
@@ -126,7 +135,7 @@
 			   rand % conn->c_reconnect_jiffies);
 
 	conn->c_reconnect_jiffies = min(conn->c_reconnect_jiffies * 2,
-				        rds_sysctl_reconnect_max_jiffies);
+					rds_sysctl_reconnect_max_jiffies);
 }
 
 void rds_connect_worker(struct work_struct *work)
@@ -140,6 +149,10 @@
 		rdsdebug("connect conn %p for %u.%u.%u.%u -> %u.%u.%u.%u "
 			 "ret %d\n", conn, NIPQUAD(conn->c_laddr),
 			 NIPQUAD(conn->c_faddr), ret);
+		rdstrace(RDS_CONNECTION, RDS_MINIMAL,
+		  "conn %p for %u.%u.%u.%u to %u.%u.%u.%u dispatched, ret %d\n",
+		  conn, NIPQUAD(conn->c_laddr), NIPQUAD(conn->c_faddr), ret);
+
 		if (ret) {
 			if (rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_DOWN))
 				rds_queue_reconnect(conn);
@@ -172,10 +185,10 @@
 		}
 		mutex_unlock(&conn->c_cm_lock);
 
-		down(&conn->c_send_sem);
+		mutex_lock(&conn->c_send_lock);
 		conn->c_trans->conn_shutdown(conn);
 		rds_conn_reset(conn);
-		up(&conn->c_send_sem);
+		mutex_unlock(&conn->c_send_lock);
 
 		if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
 			/* This can happen - eg when we're in the middle of tearing
@@ -186,7 +199,7 @@
 			rds_conn_error(conn,
 				"%s: failed to transition to state DOWN, "
 				"current state is %d\n",
-				__FUNCTION__,
+				__func__,
 				atomic_read(&conn->c_state));
 			return;
 		}
@@ -197,9 +210,8 @@
 	 * to the conn hash, so we never trigger a reconnect on this
 	 * conn - the reconnect is always triggered by the active peer. */
 	cancel_delayed_work(&conn->c_conn_w);
-	if (!hlist_unhashed(&conn->c_hash_node)) {
+	if (!hlist_unhashed(&conn->c_hash_node))
 		rds_queue_reconnect(conn);
-	}
 }
 
 void rds_send_worker(struct work_struct *work)
@@ -211,15 +223,15 @@
 		ret = rds_send_xmit(conn);
 		rdsdebug("conn %p ret %d\n", conn, ret);
 		switch (ret) {
-			case -EAGAIN:
-				rds_stats_inc(s_send_immediate_retry);
-				queue_delayed_work(rds_wq, &conn->c_send_w, 0);
-				break;
-			case -ENOMEM:
-				rds_stats_inc(s_send_delayed_retry);
-				queue_delayed_work(rds_wq, &conn->c_send_w, 2);
-			default:
-				break;
+		case -EAGAIN:
+			rds_stats_inc(s_send_immediate_retry);
+			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+			break;
+		case -ENOMEM:
+			rds_stats_inc(s_send_delayed_retry);
+			queue_delayed_work(rds_wq, &conn->c_send_w, 2);
+		default:
+			break;
 		}
 	}
 }
@@ -233,16 +245,15 @@
 		ret = conn->c_trans->recv(conn);
 		rdsdebug("conn %p ret %d\n", conn, ret);
 		switch (ret) {
-			case -EAGAIN:
-				rds_stats_inc(s_recv_immediate_retry);
-				queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
-				break;
-			case -ENOMEM:
-				rds_stats_inc(s_recv_delayed_retry);
-				queue_delayed_work(rds_wq, &conn->c_recv_w,
-						   2);
-			default:
-				break;
+		case -EAGAIN:
+			rds_stats_inc(s_recv_immediate_retry);
+			queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+			break;
+		case -ENOMEM:
+			rds_stats_inc(s_recv_delayed_retry);
+			queue_delayed_work(rds_wq, &conn->c_recv_w, 2);
+		default:
+			break;
 		}
 	}
 }
@@ -255,7 +266,7 @@
 int __init rds_threads_init(void)
 {
 	rds_wq = create_singlethread_workqueue("krdsd");
-	if (rds_wq == NULL) 
+	if (rds_wq == NULL)
 		return -ENOMEM;
 
 	return 0;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/transport.c
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/transport.c	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/net/rds/transport.c	2009-05-30 10:58:06 UTC (rev 299)
@@ -35,6 +35,7 @@
 #include <linux/in.h>
 
 #include "rds.h"
+#include "loop.h"
 
 static LIST_HEAD(transports);
 static DECLARE_RWSEM(trans_sem);
@@ -71,6 +72,9 @@
 	struct rds_transport *trans;
 	struct rds_transport *ret = NULL;
 
+        if (IN_LOOPBACK(ntohl(addr)))
+                return &rds_loop_transport;
+
 	down_read(&trans_sem);
 	list_for_each_entry(trans, &transports, t_item) {
 		if (trans->laddr_check(addr) == 0) {
@@ -83,18 +87,6 @@
 	return ret;
 }
 
-void rds_trans_stop_listening(void)
-{
-	struct rds_transport *trans;
-
-	down_read(&trans_sem);
-
-	list_for_each_entry(trans, &transports, t_item)
-		trans->listen_stop();
-
-	up_read(&trans_sem);
-}
-
 /*
  * This returns the number of stats entries in the snapshot and only
  * copies them using the iter if there is enough space for them.  The
@@ -126,45 +118,3 @@
 	return total;
 }
 
-/*
- * We don't quite have modular transports yet.. we're in RDS module
- * teardown and have all sockets and conns torn down.  This trusts that
- * the transports will unregister themselves as they tear down their
- * resources.
- */
-void rds_trans_exit(void)
-{
-	struct rds_transport *trans;
-
-	do {
-		down_read(&trans_sem);
-		if (!list_empty(&transports))
-			trans = list_entry(transports.next,
-					   struct rds_transport, t_item);
-		else
-			trans = NULL;
-		up_read(&trans_sem);
-
-		/* trans->exit() will remove the trans from the list */
-		if (trans)
-			trans->exit();
-	} while (trans);
-}
-
-int __init rds_trans_init(void)
-{
-	int ret = 0;
-	/* a hack until we have proper modular transports */
-#ifdef CONFIG_RDS_IB
-	{
-		extern int __init rds_ib_init(void);
-		ret = rds_ib_init();
-		if (ret)
-			goto out;
-	}
-#endif
-out:
-	if (ret)
-		rds_trans_exit();
-	return ret;
-}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/Makefile
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/Makefile	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/Makefile	2009-05-30 10:58:06 UTC (rev 299)
@@ -15,7 +15,7 @@
 obj-$(CONFIG_RDS)               += net/rds/
 obj-$(CONFIG_MEMTRACK)          += drivers/infiniband/debug/
 obj-$(CONFIG_SUNRPC_XPRT_RDMA)  += net/sunrpc/
-obj-$(CONFIG_SUNRPC_XPRT_RDMA)  += net/sunrpc/auth_gss
+obj-$(CONFIG_SUNRPC_XPRT_RDMA)  += net/sunrpc/auth_gss/
 obj-$(CONFIG_SUNRPC_XPRT_RDMA)  += net/sunrpc/xprtrdma/
 obj-$(CONFIG_SUNRPC_XPRT_RDMA)  += fs/nfs/
 obj-$(CONFIG_SUNRPC_XPRT_RDMA)  += fs/lockd/

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/checkout_files
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/checkout_files	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/checkout_files	2009-05-30 10:58:06 UTC (rev 299)
@@ -12,7 +12,6 @@
 fs/lockd/
 fs/nfs/
 fs/nfs_common/
-fs/nfsctl.c
 fs/nfsd/
 include/linux/exportfs.h
 include/linux/lockd/

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/configure
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/configure	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/configure	2009-05-30 10:58:06 UTC (rev 299)
@@ -453,10 +453,12 @@
                         --with-rds-mod)
                             CONFIG_RDS="m"
                             CONFIG_RDS_IB="m"
+                            CONFIG_RDS_IWARP="m"
                         ;;
                         --without-rds-mod)
                             CONFIG_RDS=
                             CONFIG_RDS_IB=
+                            CONFIG_RDS_IWARP=
                         ;;
                         --with-rds_debug-mod)
                             CONFIG_RDS_DEBUG="y"
@@ -620,6 +622,7 @@
 CONFIG_INFINIBAND_EHCA_SCALING=${CONFIG_INFINIBAND_EHCA_SCALING:-''}
 CONFIG_RDS=${CONFIG_RDS:-''}
 CONFIG_RDS_IB=${CONFIG_RDS_IB:-''}
+CONFIG_RDS_IWARP=${CONFIG_RDS_IWARP:-''}
 CONFIG_RDS_DEBUG=${CONFIG_RDS_DEBUG:-''}
 CONFIG_INFINIBAND_MADEYE=${CONFIG_INFINIBAND_MADEYE:-''}
 CONFIG_INFINIBAND_QLGC_VNIC=${CONFIG_INFINIBAND_QLGC_VNIC:-''}
@@ -671,8 +674,8 @@
                 exit 1
         fi
 
-	if [ ! -e ofed_patch.mk ]; then
-		echo "ofed_patch.mk does not exist. running ofed_patch.sh"
+	if [ ! -e config.mk ]; then
+		echo "config.mk does not exist. running ofed_patch.sh"
 		ex ${CWD}/ofed_scripts/ofed_patch.sh ${ofed_patch_params}
 	fi
 
@@ -720,6 +723,7 @@
 CONFIG_INFINIBAND_EHCA_SCALING=${CONFIG_INFINIBAND_EHCA_SCALING}
 CONFIG_RDS=${CONFIG_RDS}
 CONFIG_RDS_IB=${CONFIG_RDS_IB}
+CONFIG_RDS_IWARP=${CONFIG_RDS_IWARP}
 CONFIG_RDS_DEBUG=${CONFIG_RDS_DEBUG}
 CONFIG_INFINIBAND_MADEYE=${CONFIG_INFINIBAND_MADEYE}
 CONFIG_INFINIBAND_QLGC_VNIC=${CONFIG_INFINIBAND_QLGC_VNIC}
@@ -768,252 +772,257 @@
 
 # Create autoconf.h
 if [ "X${CONFIG_MEMTRACK}" == "Xm" ]; then
-        DEFINE_MEMTRACK="#define CONFIG_MEMTRACK 1"
+        DEFINE_MEMTRACK="#undef CONFIG_MEMTRACK\n#define CONFIG_MEMTRACK 1"
 else
-        DEFINE_MEMTRACK="#undef CONFIG_MEMTRACK"
+        DEFINE_MEMTRACK="/* CONFIG_MEMTRACK is not set */"
 fi
 if [ "X${CONFIG_DEBUG_INFO}" == "Xy" ]; then
-        DEFINE_DEBUG_INFO="#define CONFIG_DEBUG_INFO 1"
+        DEFINE_DEBUG_INFO="#undef CONFIG_DEBUG_INFO\n#define CONFIG_DEBUG_INFO 1"
 else
-        DEFINE_DEBUG_INFO="#undef CONFIG_DEBUG_INFO"
+        DEFINE_DEBUG_INFO="/* CONFIG_DEBUG_INFO is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND}" == "Xm" ]; then
-        DEFINE_INFINIBAND="#define CONFIG_INFINIBAND 1"
+        DEFINE_INFINIBAND="#undef CONFIG_INFINIBAND\n#define CONFIG_INFINIBAND 1"
 else
-        DEFINE_INFINIBAND="#undef CONFIG_INFINIBAND"
+        DEFINE_INFINIBAND="/* CONFIG_INFINIBAND is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_IPOIB}" == "Xm" ]; then
-        DEFINE_INFINIBAND_IPOIB="#define CONFIG_INFINIBAND_IPOIB 1"
+        DEFINE_INFINIBAND_IPOIB="#undef CONFIG_INFINIBAND_IPOIB\n#define CONFIG_INFINIBAND_IPOIB 1"
 else
-        DEFINE_INFINIBAND_IPOIB="#undef CONFIG_INFINIBAND_IPOIB"
+        DEFINE_INFINIBAND_IPOIB="/* CONFIG_INFINIBAND_IPOIB is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_IPOIB_CM}" == "Xy" ]; then
-        DEFINE_INFINIBAND_IPOIB_CM="#define CONFIG_INFINIBAND_IPOIB_CM 1"
+        DEFINE_INFINIBAND_IPOIB_CM="#undef CONFIG_INFINIBAND_IPOIB_CM\n#define CONFIG_INFINIBAND_IPOIB_CM 1"
 else
-        DEFINE_INFINIBAND_IPOIB_CM="#undef CONFIG_INFINIBAND_IPOIB_CM"
+        DEFINE_INFINIBAND_IPOIB_CM="/* CONFIG_INFINIBAND_IPOIB_CM is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_SDP}" == "Xm" ]; then
-        DEFINE_INFINIBAND_SDP="#define CONFIG_INFINIBAND_SDP 1"
+        DEFINE_INFINIBAND_SDP="#undef CONFIG_INFINIBAND_SDP\n#define CONFIG_INFINIBAND_SDP 1"
 else
-        DEFINE_INFINIBAND_SDP="#undef CONFIG_INFINIBAND_SDP"
+        DEFINE_INFINIBAND_SDP="/* CONFIG_INFINIBAND_SDP is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_SRP}" == "Xm" ]; then
-        DEFINE_INFINIBAND_SRP="#define CONFIG_INFINIBAND_SRP 1"
+        DEFINE_INFINIBAND_SRP="#undef CONFIG_INFINIBAND_SRP\n#define CONFIG_INFINIBAND_SRP 1"
 else
-        DEFINE_INFINIBAND_SRP="#undef CONFIG_INFINIBAND_SRP"
+        DEFINE_INFINIBAND_SRP="/* CONFIG_INFINIBAND_SRP is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_SRPT}" == "Xm" ]; then
-        DEFINE_INFINIBAND_SRPT="#define CONFIG_INFINIBAND_SRPT 1"
+        DEFINE_INFINIBAND_SRPT="#undef CONFIG_INFINIBAND_SRPT\n#define CONFIG_INFINIBAND_SRPT 1"
 else
-        DEFINE_INFINIBAND_SRPT="#undef CONFIG_INFINIBAND_SRPT"
+        DEFINE_INFINIBAND_SRPT="/* CONFIG_INFINIBAND_SRPT is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_USER_MAD}" == "Xm" ]; then
-        DEFINE_INFINIBAND_USER_MAD="#define CONFIG_INFINIBAND_USER_MAD 1"
+        DEFINE_INFINIBAND_USER_MAD="#undef CONFIG_INFINIBAND_USER_MAD\n#define CONFIG_INFINIBAND_USER_MAD 1"
 else
-        DEFINE_INFINIBAND_USER_MAD="#undef CONFIG_INFINIBAND_USER_MAD"
+        DEFINE_INFINIBAND_USER_MAD="/* CONFIG_INFINIBAND_USER_MAD is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_USER_ACCESS}" == "Xm" ]; then
-        DEFINE_INFINIBAND_USER_ACCESS="#define CONFIG_INFINIBAND_USER_ACCESS 1"
+        DEFINE_INFINIBAND_USER_ACCESS="#undef CONFIG_INFINIBAND_USER_ACCESS\n#define CONFIG_INFINIBAND_USER_ACCESS 1"
 else
-        DEFINE_INFINIBAND_USER_ACCESS="#undef CONFIG_INFINIBAND_USER_ACCESS"
+        DEFINE_INFINIBAND_USER_ACCESS="/* CONFIG_INFINIBAND_USER_ACCESS is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_ADDR_TRANS}" == "Xy" ]; then
-        DEFINE_INFINIBAND_ADDR_TRANS="#define CONFIG_INFINIBAND_ADDR_TRANS 1"
+        DEFINE_INFINIBAND_ADDR_TRANS="#undef CONFIG_INFINIBAND_ADDR_TRANS\n#define CONFIG_INFINIBAND_ADDR_TRANS 1"
 else
-        DEFINE_INFINIBAND_ADDR_TRANS="#undef CONFIG_INFINIBAND_ADDR_TRANS"
+        DEFINE_INFINIBAND_ADDR_TRANS="/* CONFIG_INFINIBAND_ADDR_TRANS is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_USER_MEM}" == "Xy" ]; then
-        DEFINE_INFINIBAND_USER_MEM="#define CONFIG_INFINIBAND_USER_MEM 1"
+        DEFINE_INFINIBAND_USER_MEM="#undef CONFIG_INFINIBAND_USER_MEM\n#define CONFIG_INFINIBAND_USER_MEM 1"
 else
-        DEFINE_INFINIBAND_USER_MEM="#undef CONFIG_INFINIBAND_USER_MEM"
+        DEFINE_INFINIBAND_USER_MEM="/* CONFIG_INFINIBAND_USER_MEM is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_MTHCA}" == "Xm" ]; then
-        DEFINE_INFINIBAND_MTHCA="#define CONFIG_INFINIBAND_MTHCA 1"
+        DEFINE_INFINIBAND_MTHCA="#undef CONFIG_INFINIBAND_MTHCA\n#define CONFIG_INFINIBAND_MTHCA 1"
 else
-        DEFINE_INFINIBAND_MTHCA="#undef CONFIG_INFINIBAND_MTHCA"
+        DEFINE_INFINIBAND_MTHCA="/* CONFIG_INFINIBAND_MTHCA is not set */"
 fi
 if [ "X${CONFIG_MLX4_CORE}" == "Xm" ]; then
-        DEFINE_MLX4_CORE="#define CONFIG_MLX4_CORE 1"
+        DEFINE_MLX4_CORE="#undef CONFIG_MLX4_CORE\n#define CONFIG_MLX4_CORE 1"
 else
-        DEFINE_MLX4_CORE="#undef CONFIG_MLX4_CORE"
+        DEFINE_MLX4_CORE="/* CONFIG_MLX4_CORE is not set */"
 fi
 if [ "X${CONFIG_MLX4_EN}" == "Xm" ]; then
-        DEFINE_MLX4_EN="#define CONFIG_MLX4_EN 1"
+        DEFINE_MLX4_EN="#undef CONFIG_MLX4_EN\n#define CONFIG_MLX4_EN 1"
 else
-        DEFINE_MLX4_EN="#undef CONFIG_MLX4_EN"
+        DEFINE_MLX4_EN="/* CONFIG_MLX4_EN is not set */"
 fi
 if [ "X${CONFIG_MLX4_INFINIBAND}" == "Xm" ]; then
-        DEFINE_MLX4_INFINIBAND="#define CONFIG_MLX4_INFINIBAND 1"
+        DEFINE_MLX4_INFINIBAND="#undef CONFIG_MLX4_INFINIBAND\n#define CONFIG_MLX4_INFINIBAND 1"
 else
-        DEFINE_MLX4_INFINIBAND="#undef CONFIG_MLX4_INFINIBAND"
+        DEFINE_MLX4_INFINIBAND="/* CONFIG_MLX4_INFINIBAND is not set */"
 fi
 if [ "X${CONFIG_MLX4_DEBUG}" == "Xy" ]; then
-        DEFINE_MLX4_DEBUG="#define CONFIG_MLX4_DEBUG 1"
+        DEFINE_MLX4_DEBUG="#undef CONFIG_MLX4_DEBUG\n#define CONFIG_MLX4_DEBUG 1"
 else
-        DEFINE_MLX4_DEBUG="#undef CONFIG_MLX4_DEBUG"
+        DEFINE_MLX4_DEBUG="/* CONFIG_MLX4_DEBUG is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_IPOIB_DEBUG}" == "Xy" ]; then
-        DEFINE_INFINIBAND_IPOIB_DEBUG="#define CONFIG_INFINIBAND_IPOIB_DEBUG 1"
+        DEFINE_INFINIBAND_IPOIB_DEBUG="#undef CONFIG_INFINIBAND_IPOIB_DEBUG\n#define CONFIG_INFINIBAND_IPOIB_DEBUG 1"
 else
-        DEFINE_INFINIBAND_IPOIB_DEBUG="#undef CONFIG_INFINIBAND_IPOIB_DEBUG"
+        DEFINE_INFINIBAND_IPOIB_DEBUG="/* CONFIG_INFINIBAND_IPOIB_DEBUG is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_ISER}" == "Xm" ]; then
-        DEFINE_INFINIBAND_ISER="#define CONFIG_INFINIBAND_ISER 1"
-        DEFINE_SCSI_ISCSI_ATTRS="#define CONFIG_SCSI_ISCSI_ATTRS 1"
-        DEFINE_ISCSI_TCP="#define CONFIG_ISCSI_TCP 1"
+        DEFINE_INFINIBAND_ISER="#undef CONFIG_INFINIBAND_ISER\n#define CONFIG_INFINIBAND_ISER 1"
+        DEFINE_SCSI_ISCSI_ATTRS="#undef CONFIG_SCSI_ISCSI_ATTRS\n#define CONFIG_SCSI_ISCSI_ATTRS 1"
+        DEFINE_ISCSI_TCP="#undef CONFIG_ISCSI_TCP\n#define CONFIG_ISCSI_TCP 1"
 else
-        DEFINE_INFINIBAND_ISER="#undef CONFIG_INFINIBAND_ISER"
-        DEFINE_SCSI_ISCSI_ATTRS="#undef CONFIG_SCSI_ISCSI_ATTRS"
-        DEFINE_ISCSI_TCP="#undef CONFIG_ISCSI_TCP"
+        DEFINE_INFINIBAND_ISER="/* CONFIG_INFINIBAND_ISER is not set */"
+        DEFINE_SCSI_ISCSI_ATTRS="/* CONFIG_SCSI_ISCSI_ATTRS is not set */"
+        DEFINE_ISCSI_TCP="/* CONFIG_ISCSI_TCP is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_EHCA}" == "Xm" ]; then
-        DEFINE_INFINIBAND_EHCA="#define CONFIG_INFINIBAND_EHCA 1"
+        DEFINE_INFINIBAND_EHCA="#undef CONFIG_INFINIBAND_EHCA\n#define CONFIG_INFINIBAND_EHCA 1"
 else
-        DEFINE_INFINIBAND_EHCA="#undef CONFIG_INFINIBAND_EHCA"
+        DEFINE_INFINIBAND_EHCA="/* CONFIG_INFINIBAND_EHCA is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_EHCA_SCALING}" == "Xy" ]; then
-        DEFINE_INFINIBAND_EHCA_SCALING="#define CONFIG_INFINIBAND_EHCA_SCALING 1"
+        DEFINE_INFINIBAND_EHCA_SCALING="#undef CONFIG_INFINIBAND_EHCA_SCALING\n#define CONFIG_INFINIBAND_EHCA_SCALING 1"
 else
-        DEFINE_INFINIBAND_EHCA_SCALING="#undef CONFIG_INFINIBAND_EHCA_SCALING"
+        DEFINE_INFINIBAND_EHCA_SCALING="/* CONFIG_INFINIBAND_EHCA_SCALING is not set */"
 fi
 if [ "X${CONFIG_RDS}" == "Xm" ]; then
-        DEFINE_RDS="#define CONFIG_RDS 1"
-        DEFINE_RDS_IB="#define CONFIG_RDS_IB 1"
+        DEFINE_RDS="#undef CONFIG_RDS\n#define CONFIG_RDS 1"
+        DEFINE_RDS_IB="#undef CONFIG_RDS_IB\n#define CONFIG_RDS_IB 1"
+        DEFINE_RDS_IWARP="#undef CONFIG_RDS_IWARP\n#define CONFIG_RDS_IWARP 1"
 else
-        DEFINE_RDS="#undef CONFIG_RDS"
-        DEFINE_RDS_IB="#undef CONFIG_RDS_IB"
+        DEFINE_RDS="/* CONFIG_RDS is not set */"
+        DEFINE_RDS_IB="/* CONFIG_RDS_IB is not set */"
+        DEFINE_RDS_IWARP="/* CONFIG_RDS_IWARP is not set */"
 fi
 if [ "X${CONFIG_RDS_DEBUG}" == "Xy" ]; then
-        DEFINE_RDS_DEBUG="#define CONFIG_RDS_DEBUG 1"
+        DEFINE_RDS_DEBUG="#undef CONFIG_RDS_DEBUG\n#define CONFIG_RDS_DEBUG 1"
 else
-        DEFINE_RDS_DEBUG="#undef CONFIG_RDS_DEBUG"
+        DEFINE_RDS_DEBUG="/* CONFIG_RDS_DEBUG is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_MADEYE}" == "Xm" ]; then
-        DEFINE_INFINIBAND_MADEYE="#define CONFIG_INFINIBAND_MADEYE 1"
+        DEFINE_INFINIBAND_MADEYE="#undef CONFIG_INFINIBAND_MADEYE\n#define CONFIG_INFINIBAND_MADEYE 1"
 else
-        DEFINE_INFINIBAND_MADEYE="#undef CONFIG_INFINIBAND_MADEYE"
+        DEFINE_INFINIBAND_MADEYE="/* CONFIG_INFINIBAND_MADEYE is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_IPOIB_DEBUG_DATA}" == "Xy" ]; then
-        DEFINE_INFINIBAND_IPOIB_DEBUG_DATA="#define CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 1"
+        DEFINE_INFINIBAND_IPOIB_DEBUG_DATA="#undef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA\n#define CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 1"
 else
-        DEFINE_INFINIBAND_IPOIB_DEBUG_DATA="#undef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA"
+        DEFINE_INFINIBAND_IPOIB_DEBUG_DATA="/* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_SDP_SEND_ZCOPY}" == "Xy" ]; then
-        DEFINE_INFINIBAND_SDP_SEND_ZCOPY="#define CONFIG_INFINIBAND_SDP_SEND_ZCOPY 1"
+        DEFINE_INFINIBAND_SDP_SEND_ZCOPY="#undef CONFIG_INFINIBAND_SDP_SEND_ZCOPY\n#define CONFIG_INFINIBAND_SDP_SEND_ZCOPY 1"
 else
-        DEFINE_INFINIBAND_SDP_SEND_ZCOPY="#undef CONFIG_INFINIBAND_SDP_SEND_ZCOPY"
+        DEFINE_INFINIBAND_SDP_SEND_ZCOPY="/* CONFIG_INFINIBAND_SDP_SEND_ZCOPY is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_SDP_RECV_ZCOPY}" == "Xy" ]; then
-        DEFINE_INFINIBAND_SDP_RECV_ZCOPY="#define CONFIG_INFINIBAND_SDP_RECV_ZCOPY 1"
+        DEFINE_INFINIBAND_SDP_RECV_ZCOPY="#undef CONFIG_INFINIBAND_SDP_RECV_ZCOPY\n#define CONFIG_INFINIBAND_SDP_RECV_ZCOPY 1"
 else
-        DEFINE_INFINIBAND_SDP_RECV_ZCOPY="#undef CONFIG_INFINIBAND_SDP_RECV_ZCOPY"
+        DEFINE_INFINIBAND_SDP_RECV_ZCOPY="/* CONFIG_INFINIBAND_SDP_RECV_ZCOPY is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_SDP_DEBUG}" == "Xy" ]; then
-        DEFINE_INFINIBAND_SDP_DEBUG="#define CONFIG_INFINIBAND_SDP_DEBUG 1"
+        DEFINE_INFINIBAND_SDP_DEBUG="#undef CONFIG_INFINIBAND_SDP_DEBUG\n#define CONFIG_INFINIBAND_SDP_DEBUG 1"
 else
-        DEFINE_INFINIBAND_SDP_DEBUG="#undef CONFIG_INFINIBAND_SDP_DEBUG"
+        DEFINE_INFINIBAND_SDP_DEBUG="/* CONFIG_INFINIBAND_SDP_DEBUG is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_SDP_DEBUG_DATA}" == "Xy" ]; then
-        DEFINE_INFINIBAND_SDP_DEBUG_DATA="#define CONFIG_INFINIBAND_SDP_DEBUG_DATA 1"
+        DEFINE_INFINIBAND_SDP_DEBUG_DATA="#undef CONFIG_INFINIBAND_SDP_DEBUG_DATA\n#define CONFIG_INFINIBAND_SDP_DEBUG_DATA 1"
 else
-        DEFINE_INFINIBAND_SDP_DEBUG_DATA="#undef CONFIG_INFINIBAND_SDP_DEBUG_DATA"
+        DEFINE_INFINIBAND_SDP_DEBUG_DATA="/* CONFIG_INFINIBAND_SDP_DEBUG_DATA is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_IPATH}" == "Xm" ]; then
-        DEFINE_INFINIBAND_IPATH="#define CONFIG_INFINIBAND_IPATH 1"
+        DEFINE_INFINIBAND_IPATH="#undef CONFIG_INFINIBAND_IPATH\n#define CONFIG_INFINIBAND_IPATH 1"
 else
-        DEFINE_INFINIBAND_IPATH="#undef CONFIG_INFINIBAND_IPATH"
+        DEFINE_INFINIBAND_IPATH="/* CONFIG_INFINIBAND_IPATH is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_MTHCA_DEBUG}" == "Xy" ]; then
-        DEFINE_INFINIBAND_MTHCA_DEBUG="#define CONFIG_INFINIBAND_MTHCA_DEBUG 1"
+        DEFINE_INFINIBAND_MTHCA_DEBUG="#undef CONFIG_INFINIBAND_MTHCA_DEBUG\n#define CONFIG_INFINIBAND_MTHCA_DEBUG 1"
 else
-        DEFINE_INFINIBAND_MTHCA_DEBUG="#undef CONFIG_INFINIBAND_MTHCA_DEBUG"
+        DEFINE_INFINIBAND_MTHCA_DEBUG="/* CONFIG_INFINIBAND_MTHCA_DEBUG is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_QLGC_VNIC}" == "Xm" ]; then
-        DEFINE_INFINIBAND_QLGC_VNIC="#define CONFIG_INFINIBAND_QLGC_VNIC 1"
+        DEFINE_INFINIBAND_QLGC_VNIC="#undef CONFIG_INFINIBAND_QLGC_VNIC\n#define CONFIG_INFINIBAND_QLGC_VNIC 1"
 else
-        DEFINE_INFINIBAND_QLGC_VNIC="#undef CONFIG_INFINIBAND_QLGC_VNIC"
+        DEFINE_INFINIBAND_QLGC_VNIC="/* CONFIG_INFINIBAND_QLGC_VNIC is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_QLGC_VNIC_STATS}" == "Xy" ]; then
-        DEFINE_INFINIBAND_QLGC_VNIC_STATS="#define CONFIG_INFINIBAND_QLGC_VNIC_STATS 1"
+        DEFINE_INFINIBAND_QLGC_VNIC_STATS="#undef CONFIG_INFINIBAND_QLGC_VNIC_STATS\n#define CONFIG_INFINIBAND_QLGC_VNIC_STATS 1"
 else
-        DEFINE_INFINIBAND_QLGC_VNIC_STATS="#undef CONFIG_INFINIBAND_QLGC_VNIC_STATS"
+        DEFINE_INFINIBAND_QLGC_VNIC_STATS="/* CONFIG_INFINIBAND_QLGC_VNIC_STATS is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_CXGB3}" == "Xm" ]; then
-        DEFINE_INFINIBAND_CXGB3="#define CONFIG_INFINIBAND_CXGB3 1"
+        DEFINE_INFINIBAND_CXGB3="#undef CONFIG_INFINIBAND_CXGB3\n#define CONFIG_INFINIBAND_CXGB3 1"
 else
-        DEFINE_INFINIBAND_CXGB3="#undef CONFIG_INFINIBAND_CXGB3"
+        DEFINE_INFINIBAND_CXGB3="/* CONFIG_INFINIBAND_CXGB3 is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_CXGB3_DEBUG}" == "Xy" ]; then
-        DEFINE_INFINIBAND_CXGB3_DEBUG="#define CONFIG_INFINIBAND_CXGB3_DEBUG 1"
+        DEFINE_INFINIBAND_CXGB3_DEBUG="#undef CONFIG_INFINIBAND_CXGB3_DEBUG\n#define CONFIG_INFINIBAND_CXGB3_DEBUG 1"
 else
-        DEFINE_INFINIBAND_CXGB3_DEBUG="#undef CONFIG_INFINIBAND_CXGB3_DEBUG"
+        DEFINE_INFINIBAND_CXGB3_DEBUG="/* CONFIG_INFINIBAND_CXGB3_DEBUG is not set */"
 fi
 if [ "X${CONFIG_CHELSIO_T3}" == "Xm" ]; then
-        DEFINE_CHELSIO_T3="#define CONFIG_CHELSIO_T3 1"
+        DEFINE_CHELSIO_T3="#undef CONFIG_CHELSIO_T3\n#define CONFIG_CHELSIO_T3 1"
 else
-        DEFINE_CHELSIO_T3="#undef CONFIG_CHELSIO_T3"
+        DEFINE_CHELSIO_T3="/* CONFIG_CHELSIO_T3 is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_NES}" == "Xm" ]; then
-        DEFINE_INFINIBAND_NES="#define CONFIG_INFINIBAND_NES 1"
+        DEFINE_INFINIBAND_NES="#undef CONFIG_INFINIBAND_NES\n#define CONFIG_INFINIBAND_NES 1"
 else
-        DEFINE_INFINIBAND_NES="#undef CONFIG_INFINIBAND_NES"
+        DEFINE_INFINIBAND_NES="/* CONFIG_INFINIBAND_NES is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_NES_DEBUG}" == "Xy" ]; then
-        DEFINE_INFINIBAND_NES_DEBUG="#define CONFIG_INFINIBAND_NES_DEBUG 1"
+        DEFINE_INFINIBAND_NES_DEBUG="#undef CONFIG_INFINIBAND_NES_DEBUG\n#define CONFIG_INFINIBAND_NES_DEBUG 1"
 else
-        DEFINE_INFINIBAND_NES_DEBUG="#undef CONFIG_INFINIBAND_NES_DEBUG"
+        DEFINE_INFINIBAND_NES_DEBUG="/* CONFIG_INFINIBAND_NES_DEBUG is not set */"
 fi
 if [ "X${CONFIG_INFINIBAND_AMSO1100}" == "Xm" ]; then
-        DEFINE_INFINIBAND_AMSO1100="#define CONFIG_INFINIBAND_AMSO1100 1"
+        DEFINE_INFINIBAND_AMSO1100="#undef CONFIG_INFINIBAND_AMSO1100\n#define CONFIG_INFINIBAND_AMSO1100 1"
 else
-        DEFINE_INFINIBAND_AMSO1100="#undef CONFIG_INFINIBAND_AMSO1100"
+        DEFINE_INFINIBAND_AMSO1100="/* CONFIG_INFINIBAND_AMSO1100 is not set */"
 fi
 if [ "X${CONFIG_SUNRPC_XPRT_RDMA}" == "Xm" ]; then
-        DEFINE_CONFIG_SUNRPC_XPRT_RDMA="#define CONFIG_SUNRPC_XPRT_RDMA 1"
-        DEFINE_CONFIG_SUNRPC="#define CONFIG_SUNRPC 1"
-        DEFINE_CONFIG_SUNRPC_GSS="#define CONFIG_SUNRPC_GSS 1"
-        DEFINE_CONFIG_RPCSEC_GSS_KRB5="#define CONFIG_RPCSEC_GSS_KRB5 1"
-        DEFINE_CONFIG_RPCSEC_GSS_SPKM3="#define CONFIG_RPCSEC_GSS_SPKM3 1"
-        DEFINE_CONFIG_NFS_FS="#define CONFIG_NFS_FS 1"
-        DEFINE_CONFIG_NFS_V3="#define CONFIG_NFS_V3 1"
-        DEFINE_CONFIG_NFS_V3_ACL="#define CONFIG_NFS_V3_ACL 1"
-	DEFINE_CONFIG_NFS_V4="#define CONFIG_NFS_V4 1"
-        DEFINE_CONFIG_NFS_ACL_SUPPORT="#define CONFIG_NFS_ACL_SUPPORT 1"
-        DEFINE_CONFIG_NFS_DIRECTIO="#define CONFIG_NFS_DIRECTIO 1"
-        DEFINE_CONFIG_SYSCTL="#define CONFIG_SYSCTL 1"
-	DEFINE_CONFIG_EXPORTFS="#define CONFIG_EXPORTFS 1"
-        DEFINE_CONFIG_LOCKD="#define CONFIG_LOCKD 1"
-        DEFINE_CONFIG_LOCKD_V4="#define CONFIG_LOCKD_V4 1"
-        DEFINE_CONFIG_NFSD="#define CONFIG_NFSD 1"
-        DEFINE_CONFIG_NFSD_V2_ACL="#define CONFIG_NFSD_V2_ACL 1"
-        DEFINE_CONFIG_NFSD_V3="#define CONFIG_NFSD_V3 1"
-        DEFINE_CONFIG_NFSD_V3_ACL="#define CONFIG_NFSD_V3_ACL 1"
-        DEFINE_CONFIG_NFSD_V4="#define CONFIG_NFSD_V4 1"
-        DEFINE_CONFIG_NFSD_RDMA="#define CONFIG_NFSD_RDMA 1"
+        DEFINE_SUNRPC_XPRT_RDMA="#undef CONFIG_SUNRPC_XPRT_RDMA\n#define CONFIG_SUNRPC_XPRT_RDMA 1"
+        DEFINE_SUNRPC="#undef CONFIG_SUNRPC\n#define CONFIG_SUNRPC 1"
+        DEFINE_SUNRPC_GSS="#undef CONFIG_SUNRPC_GSS\n#define CONFIG_SUNRPC_GSS 1"
+        DEFINE_RPCSEC_GSS_KRB5="#undef CONFIG_RPCSEC_GSS_KRB5\n#define CONFIG_RPCSEC_GSS_KRB5 1"
+        DEFINE_RPCSEC_GSS_SPKM3="#undef CONFIG_RPCSEC_GSS_SPKM3\n#define CONFIG_RPCSEC_GSS_SPKM3 1"
+        DEFINE_NFS_FS="#undef CONFIG_NFS_FS\n#define CONFIG_NFS_FS 1"
+        DEFINE_NFS_V3="#undef CONFIG_NFS_V3\n#define CONFIG_NFS_V3 1"
+        DEFINE_NFS_V3_ACL="#undef CONFIG_NFS_V3_ACL\n#define CONFIG_NFS_V3_ACL 1"
+        DEFINE_NFS_V4="#undef CONFIG_NFS_V4\n#define CONFIG_NFS_V4 1"
+        DEFINE_NFS_ACL_SUPPORT="#undef CONFIG_NFS_ACL_SUPPORT\n#define CONFIG_NFS_ACL_SUPPORT 1"
+        DEFINE_NFS_DIRECTIO="#undef CONFIG_NFS_DIRECTIO\n#define CONFIG_NFS_DIRECTIO 1"
+        DEFINE_SYSCTL="#undef CONFIG_SYSCTL\n#define CONFIG_SYSCTL 1"
+        DEFINE_EXPORTFS="#undef CONFIG_EXPORTFS\n#define CONFIG_EXPORTFS 1"
+        DEFINE_LOCKD="#undef CONFIG_LOCKD\n#define CONFIG_LOCKD 1"
+        DEFINE_LOCKD_V4="#undef CONFIG_LOCKD_V4\n#define CONFIG_LOCKD_V4 1"
+        DEFINE_NFSD="#undef CONFIG_NFSD\n#define CONFIG_NFSD 1"
+        DEFINE_NFSD_V2_ACL="#undef CONFIG_NFSD_V2_ACL\n#define CONFIG_NFSD_V2_ACL 1"
+        DEFINE_NFSD_V3="#undef CONFIG_NFSD_V3\n#define CONFIG_NFSD_V3 1"
+        DEFINE_NFSD_V3_ACL="#undef CONFIG_NFSD_V3_ACL\n#define CONFIG_NFSD_V3_ACL 1"
+        DEFINE_NFSD_V4="#undef CONFIG_NFSD_V4\n#define CONFIG_NFSD_V4 1"
+        DEFINE_NFSD_RDMA="#undef CONFIG_NFSD_RDMA\n#define CONFIG_NFSD_RDMA 1"
 else
-        DEFINE_CONFIG_SUNRPC_XPRT_RDMA="#undef CONFIG_SUNRPC_XPRT_RDMA"
-        DEFINE_CONFIG_SUNRPC="#undef CONFIG_SUNRPC"
-        DEFINE_CONFIG_SUNRPC_GSS="#undef CONFIG_SUNRPC_GSS"
-        DEFINE_CONFIG_RPCSEC_GSS_KRB5="#undef CONFIG_RPCSEC_GSS_KRB5"
-        DEFINE_CONFIG_RPCSEC_GSS_SPKM3="#undef CONFIG_RPCSEC_GSS_SPKM3"
-        DEFINE_CONFIG_NFS_FS="#undef CONFIG_NFS_FS"
-        DEFINE_CONFIG_NFS_V3="#undef CONFIG_NFS_V3"
-        DEFINE_CONFIG_NFS_V3_ACL="#undef CONFIG_NFS_V3_ACL"
-	DEFINE_CONFIG_NFS_V4="#undef CONFIG_NFS_V4"
-        DEFINE_CONFIG_NFS_ACL_SUPPORT="#undef CONFIG_NFS_ACL_SUPPORT"
-        DEFINE_CONFIG_NFS_DIRECTIO="#undef CONFIG_NFS_DIRECTIO"
-        DEFINE_CONFIG_SYSCTL="#undef CONFIG_SYSCTL"
-	DEFINE_CONFIG_EXPORTFS="#undef CONFIG_EXPORTFS"
-        DEFINE_CONFIG_LOCKD="#undef CONFIG_LOCKD"
-        DEFINE_CONFIG_LOCKD_V4="#undef CONFIG_LOCKD_V4"
-        DEFINE_CONFIG_NFSD="#undef CONFIG_NFSD"
-        DEFINE_CONFIG_NFSD_V2_ACL="#undef CONFIG_NFSD_V2_ACL"
-        DEFINE_CONFIG_NFSD_V3="#undef CONFIG_NFSD_V3"
-        DEFINE_CONFIG_NFSD_V3_ACL="#undef CONFIG_NFSD_V3_ACL"
-        DEFINE_CONFIG_NFSD_V4="#undef CONFIG_NFSD_V4"
-        DEFINE_CONFIG_NFSD_RDMA="#undef CONFIG_NFSD_RDMA"
+        DEFINE_SUNRPC_XPRT_RDMA="/* CONFIG_SUNRPC_XPRT_RDMA is not set */"
+        DEFINE_SUNRPC="/* CONFIG_SUNRPC is not set */"
+        DEFINE_SUNRPC_GSS="/* CONFIG_SUNRPC_GSS is not set */"
+        DEFINE_RPCSEC_GSS_KRB5="/* CONFIG_RPCSEC_GSS_KRB5 is not set */"
+        DEFINE_RPCSEC_GSS_SPKM3="/* CONFIG_RPCSEC_GSS_SPKM3 is not set */"
+        DEFINE_NFS_FS="/* CONFIG_NFS_FS is not set */"
+        DEFINE_NFS_V3="/* CONFIG_NFS_V3 is not set */"
+        DEFINE_NFS_V3_ACL="/* CONFIG_NFS_V3_ACL is not set */"
+        DEFINE_NFS_V4="/* CONFIG_NFS_V4 is not set */"
+        DEFINE_NFS_ACL_SUPPORT="/* CONFIG_NFS_ACL_SUPPORT is not set */"
+        DEFINE_NFS_DIRECTIO="/* CONFIG_NFS_DIRECTIO is not set */"
+        DEFINE_SYSCTL="/* CONFIG_SYSCTL is not set */"
+        DEFINE_EXPORTFS="/* CONFIG_EXPORTFS is not set */"
+        DEFINE_LOCKD="/* CONFIG_LOCKD is not set */"
+        DEFINE_LOCKD_V4="/* CONFIG_LOCKD_V4 is not set */"
+        DEFINE_NFSD="/* CONFIG_NFSD is not set */"
+        DEFINE_NFSD_V2_ACL="/* CONFIG_NFSD_V2_ACL is not set */"
+        DEFINE_NFSD_V3="/* CONFIG_NFSD_V3 is not set */"
+        DEFINE_NFSD_V3_ACL="/* CONFIG_NFSD_V3_ACL is not set */"
+        DEFINE_NFSD_V4="/* CONFIG_NFSD_V4 is not set */"
+        DEFINE_NFSD_RDMA="/* CONFIG_NFSD_RDMA is not set */"
 fi
 cat >> ${AUTOCONF_H} << EOFAUTOCONF
+#ifndef __OFED_BUILD__
+#include_next <linux/autoconf.h>
+#else
 #undef CONFIG_MEMTRACK
 #undef CONFIG_DEBUG_INFO
 #undef CONFIG_INFINIBAND
@@ -1040,6 +1049,7 @@
 #undef CONFIG_INFINIBAND_EHCA_SCALING
 #undef CONFIG_RDS
 #undef CONFIG_RDS_IB
+#undef CONFIG_RDS_IWARP
 #undef CONFIG_RDS_DEBUG
 #undef CONFIG_INFINIBAND_MADEYE
 #undef CONFIG_INFINIBAND_QLGC_VNIC
@@ -1080,73 +1090,75 @@
 #undef CONFIG_INFINIBAND_IPATH
 #undef CONFIG_INFINIBAND_MTHCA_DEBUG
 #undef CONFIG_INFINIBAND_AMSO1100
+#endif
 
-${DEFINE_INFINIBAND}
-${DEFINE_INFINIBAND_IPOIB}
-${DEFINE_INFINIBAND_IPOIB_CM}
-${DEFINE_INFINIBAND_SDP}
-${DEFINE_INFINIBAND_SRP}
-${DEFINE_INFINIBAND_SRPT}
+$(echo -e "${DEFINE_INFINIBAND}")
+$(echo -e "${DEFINE_INFINIBAND_IPOIB}")
+$(echo -e "${DEFINE_INFINIBAND_IPOIB_CM}")
+$(echo -e "${DEFINE_INFINIBAND_SDP}")
+$(echo -e "${DEFINE_INFINIBAND_SRP}")
+$(echo -e "${DEFINE_INFINIBAND_SRPT}")
 
-${DEFINE_INFINIBAND_USER_MAD}
-${DEFINE_INFINIBAND_USER_ACCESS}
-${DEFINE_INFINIBAND_ADDR_TRANS}
-${DEFINE_INFINIBAND_USER_MEM}
-${DEFINE_INFINIBAND_MTHCA}
-${DEFINE_INFINIBAND_QLGC_VNIC}
-${DEFINE_INFINIBAND_CXGB3}
-${DEFINE_CHELSIO_T3}
-${DEFINE_INFINIBAND_NES}
+$(echo -e "${DEFINE_INFINIBAND_USER_MAD}")
+$(echo -e "${DEFINE_INFINIBAND_USER_ACCESS}")
+$(echo -e "${DEFINE_INFINIBAND_ADDR_TRANS}")
+$(echo -e "${DEFINE_INFINIBAND_USER_MEM}")
+$(echo -e "${DEFINE_INFINIBAND_MTHCA}")
+$(echo -e "${DEFINE_INFINIBAND_QLGC_VNIC}")
+$(echo -e "${DEFINE_INFINIBAND_CXGB3}")
+$(echo -e "${DEFINE_CHELSIO_T3}")
+$(echo -e "${DEFINE_INFINIBAND_NES}")
 
-${DEFINE_CONFIG_SUNRPC_XPRT_RDMA}
-${DEFINE_CONFIG_SUNRPC}
-${DEFINE_CONFIG_SUNRPC_GSS}
-${DEFINE_CONFIG_RPCSEC_GSS_KRB5}
-${DEFINE_CONFIG_RPCSEC_GSS_SPKM3}
-${DEFINE_CONFIG_NFS_FS}
-${DEFINE_CONFIG_NFS_V3}
-${DEFINE_CONFIG_NFS_V3_ACL}
-${DEFINE_CONFIG_NFS_V4}
-${DEFINE_CONFIG_NFS_ACL_SUPPORT}
-${DEFINE_CONFIG_NFS_DIRECTIO}
-${DEFINE_CONFIG_SYSCTL}
-${DEFINE_CONFIG_EXPORTFS}
-${DEFINE_CONFIG_LOCKD}
-${DEFINE_CONFIG_LOCKD_V4}
-${DEFINE_CONFIG_NFSD}
-${DEFINE_CONFIG_NFSD_V2_ACL}
-${DEFINE_CONFIG_NFSD_V3}
-${DEFINE_CONFIG_NFSD_V3_ACL}
-${DEFINE_CONFIG_NFSD_V4}
-${DEFINE_CONFIG_NFSD_RDMA}
+$(echo -e "${DEFINE_SUNRPC_XPRT_RDMA}")
+$(echo -e "${DEFINE_SUNRPC}")
+$(echo -e "${DEFINE_SUNRPC_GSS}")
+$(echo -e "${DEFINE_RPCSEC_GSS_KRB5}")
+$(echo -e "${DEFINE_RPCSEC_GSS_SPKM3}")
+$(echo -e "${DEFINE_NFS_FS}")
+$(echo -e "${DEFINE_NFS_V3}")
+$(echo -e "${DEFINE_NFS_V3_ACL}")
+$(echo -e "${DEFINE_NFS_V4}")
+$(echo -e "${DEFINE_NFS_ACL_SUPPORT}")
+$(echo -e "${DEFINE_NFS_DIRECTIO}")
+$(echo -e "${DEFINE_SYSCTL}")
+$(echo -e "${DEFINE_EXPORTFS}")
+$(echo -e "${DEFINE_LOCKD}")
+$(echo -e "${DEFINE_LOCKD_V4}")
+$(echo -e "${DEFINE_NFSD}")
+$(echo -e "${DEFINE_NFSD_V2_ACL}")
+$(echo -e "${DEFINE_NFSD_V3}")
+$(echo -e "${DEFINE_NFSD_V3_ACL}")
+$(echo -e "${DEFINE_NFSD_V4}")
+$(echo -e "${DEFINE_NFSD_RDMA}")
 
-${DEFINE_INFINIBAND_IPOIB_DEBUG}
-${DEFINE_INFINIBAND_ISER}
-${DEFINE_SCSI_ISCSI_ATTRS}
-${DEFINE_ISCSI_TCP}
-${DEFINE_INFINIBAND_EHCA}
-${DEFINE_RDS}
-${DEFINE_RDS_IB}
-${DEFINE_RDS_DEBUG}
-${DEFINE_INFINIBAND_QLGC_VNIC_STATS}
-${DEFINE_INFINIBAND_CXGB3_DEBUG}
-${DEFINE_INFINIBAND_NES_DEBUG}
+$(echo -e "${DEFINE_INFINIBAND_IPOIB_DEBUG}")
+$(echo -e "${DEFINE_INFINIBAND_ISER}")
+$(echo -e "${DEFINE_SCSI_ISCSI_ATTRS}")
+$(echo -e "${DEFINE_ISCSI_TCP}")
+$(echo -e "${DEFINE_INFINIBAND_EHCA}")
+$(echo -e "${DEFINE_RDS}")
+$(echo -e "${DEFINE_RDS_IB}")
+$(echo -e "${DEFINE_RDS_IWARP}")
+$(echo -e "${DEFINE_RDS_DEBUG}")
+$(echo -e "${DEFINE_INFINIBAND_QLGC_VNIC_STATS}")
+$(echo -e "${DEFINE_INFINIBAND_CXGB3_DEBUG}")
+$(echo -e "${DEFINE_INFINIBAND_NES_DEBUG}")
 
-${DEFINE_MLX4_CORE}
-${DEFINE_MLX4_EN}
-${DEFINE_MLX4_INFINIBAND}
-${DEFINE_MLX4_ETHERNET}
-${DEFINE_MLX4_DEBUG}
+$(echo -e "${DEFINE_MLX4_CORE}")
+$(echo -e "${DEFINE_MLX4_EN}")
+$(echo -e "${DEFINE_MLX4_INFINIBAND}")
+$(echo -e "${DEFINE_MLX4_ETHERNET}")
+$(echo -e "${DEFINE_MLX4_DEBUG}")
 
-${DEFINE_INFINIBAND_IPOIB_DEBUG_DATA}
-${DEFINE_INFINIBAND_SDP_SEND_ZCOPY}
-${DEFINE_INFINIBAND_SDP_RECV_ZCOPY}
-${DEFINE_INFINIBAND_SDP_DEBUG}
-${DEFINE_INFINIBAND_SDP_DEBUG_DATA}
-${DEFINE_INFINIBAND_IPATH}
-${DEFINE_INFINIBAND_MTHCA_DEBUG}
-${DEFINE_INFINIBAND_MADEYE}
-${DEFINE_INFINIBAND_AMSO1100}
+$(echo -e "${DEFINE_INFINIBAND_IPOIB_DEBUG_DATA}")
+$(echo -e "${DEFINE_INFINIBAND_SDP_SEND_ZCOPY}")
+$(echo -e "${DEFINE_INFINIBAND_SDP_RECV_ZCOPY}")
+$(echo -e "${DEFINE_INFINIBAND_SDP_DEBUG}")
+$(echo -e "${DEFINE_INFINIBAND_SDP_DEBUG_DATA}")
+$(echo -e "${DEFINE_INFINIBAND_IPATH}")
+$(echo -e "${DEFINE_INFINIBAND_MTHCA_DEBUG}")
+$(echo -e "${DEFINE_INFINIBAND_MADEYE}")
+$(echo -e "${DEFINE_INFINIBAND_AMSO1100}")
 
 EOFAUTOCONF
 

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/connectx_port_config
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/connectx_port_config	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/connectx_port_config	2009-05-30 10:58:06 UTC (rev 299)
@@ -46,6 +46,15 @@
 echo "	auto,eth"
 }
 
+is_integer()
+
+{
+   printf "%s\n" $1 |grep -E "^[+-]?[0-9]+$" > /dev/null
+   return $?
+}
+
+
+
 # If module $1 is loaded return - 0 else - 1
 is_module()
 {
@@ -62,11 +71,22 @@
 {
 printf "Please select device to modify [$dev]: "
 read dev;
-if [ -e ${Devices[$dev]}/mlx4_port1 ]; then
-    echo "`echo ${Devices[$dev]} | cut -d '/' -f 6`"; echo
+
+if is_integer $dev; then
+    if [ $dev -lt 1 ]; then
+        echo "Illegal index selected, quitting..."; echo
+       exit 1;
+    fi
+
+    if [ -e ${Devices[$dev]}/mlx4_port1 ]; then
+        echo "`echo ${Devices[$dev]} | cut -d '/' -f 6`"; echo
+    else
+        echo "Illegal value selected, quitting..."; echo
+        exit 1			
+    fi
 else
-    echo "Illegal value selected, quitting..."; echo
-    exit 1			
+    echo "Numeric value expected, quitting..."; echo
+    exit 1
 fi
 }
 
@@ -83,6 +103,29 @@
 done
 }
 
+validate_device()
+{
+if [ "$udevice" = "0" ]; then
+    if [ "$comm" != "" ]; then
+            if [ ${#Devices[@]} -gt 1 ]; then
+                echo "More then 1 ConnectX devices found, please specify required device"; echo
+		usage
+		exit 1
+	    fi
+    fi
+    return
+fi
+for d in ${Devices[@]}
+do
+    ser=$(($ser+1))
+    if [ "/sys/bus/pci/devices/${udevice}/" == "$d" ] || [ "/sys/bus/pci/devices/0000:${udevice}/" == "${d}" ] ; then
+        return
+    fi
+done
+echo "Error: illegal device selected."
+exit 1;
+}
+
 validate_port_config()
 {
 for conf in "eth,eth" "auto,auto" "auto,eth" "ib,ib" "ib,auto" "ib,eth"; do
@@ -101,11 +144,20 @@
 local options=$2
 printf "Select mode for port $port (${options}): "
 read mode
-if [ `echo ${options} | grep ${mode} -wc` == 0 ]; then
+if is_integer $mode; then
+    if [ $mode -lt 0 ]; then
+        echo "Illegal value selected, quitting..."; echo
+        exit 1			
+    fi
+    if [ `echo ${options} | grep ${mode} -wc` == 0 ]; then
+        echo "Illegal value selected, quitting..."; echo
+        exit 1			
+    fi
+    return "${mode}"
+else
     echo "Illegal value selected, quitting..."; echo
-    exit 1			
+    exit 1
 fi
-return "${mode}"
 }     
 
 show_configuration()
@@ -177,13 +229,28 @@
     case "$1" in
     	-s|--show)
 	    show_configuration
+            if [ -n "$2" ]; then
+		echo "Too many parameters"
+		usage
+		exit 1
+            fi
 	    exit 0
 	    ;;
 	-d|--device)
+            if [ -z "$2" ]; then
+                echo "Error: Illegal input"
+                usage
+                exit 1
+            fi
 	    udevice=$2
 	    shift 2
 	    ;;
 	-c|--conf)
+            if [ -z "$2" ]; then
+                echo "Error: Illegal input"
+                usage
+                exit 1
+            fi
     	    comm=$2
 	    shift 2;
 	    validate_port_config
@@ -225,14 +292,15 @@
 	echo "| $i             $dev |"
     fi
 done
+echo "|----------------------------|"; echo
 
+validate_device
+
 if [ $i == 0 ]; then
     echo "No devices found, quiting..."
     exit 1;
 fi
 
-echo "|----------------------------|"; echo
-
 #############################################################
 # Select device to modify                                   #
 #############################################################

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/get_backport_dir.sh
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/get_backport_dir.sh	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/get_backport_dir.sh	2009-05-30 10:58:06 UTC (rev 299)
@@ -64,15 +64,19 @@
         2.6.17*)
                 echo 2.6.17
         ;;
-        2.6.18-[5-9][0-9]*.el5*)
+        2.6.18-*.el5*)
                 minor=$(echo $KVERSION | cut -d"." -f3 | cut -d"-" -f2)
-                if [ $minor -lt 84 ]; then
+                if [ $minor -lt 50 ]; then
+                        echo 2.6.18_FC6
+                elif [ $minor -lt 84 ]; then
                         echo 2.6.18-EL5.1
-                else
+                elif [ $minor -lt 128 ]; then
                         echo 2.6.18-EL5.2
+		else
+                        echo 2.6.18-EL5.3
                 fi
         ;;
-        2.6.18-*fc[56]*|2.6.18-*el5*)
+        2.6.18-*fc[56]*)
                 echo 2.6.18_FC6
         ;;
         2.6.18.*-*-*)
@@ -113,6 +117,9 @@
         2.6.26*)
                 echo 2.6.26
         ;;
+        2.6.27.*-*)
+                echo 2.6.27_sles11
+        ;;
         *)
                 echo
         ;;

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib.conf
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib.conf	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib.conf	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,2 @@
+# install ib_ipoib modprobe --ignore-install ib_ipoib && /sbin/ib_ipoib_sysctl load
+# remove ib_ipoib /sbin/ib_ipoib_sysctl unload ; modprobe -r --ignore-remove ib_ipoib

Added: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib_sysctl
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib_sysctl	                        (rev 0)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib_sysctl	2009-05-30 10:58:06 UTC (rev 299)
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Copyright (c) 2006 Mellanox Technologies. All rights reserved.
+#
+# This Software is licensed under one of the following licenses:
+#
+# 1) under the terms of the "Common Public License 1.0" a copy of which is
+#    available from the Open Source Initiative, see
+#    http://www.opensource.org/licenses/cpl.php.
+#
+# 2) under the terms of the "The BSD License" a copy of which is
+#    available from the Open Source Initiative, see
+#    http://www.opensource.org/licenses/bsd-license.php.
+#
+# 3) under the terms of the "GNU General Public License (GPL) Version 2" a
+#    copy of which is available from the Open Source Initiative, see
+#    http://www.opensource.org/licenses/gpl-license.php.
+#
+# Licensee has the right to choose one of the above licenses.
+#
+# Redistributions of source code must retain the above copyright
+# notice and one of the license notices.
+#
+# Redistributions in binary form must reproduce both the above copyright
+# notice, one of the license notices in the documentation
+# and/or other materials provided with the distribution.
+#
+
+# Save original sysctl values
+sysctl_orig=/var/cache/ipoib_sysctl
+
+load()
+{
+	if [ -f $sysctl_orig ]; then
+		/bin/rm -f $sysctl_orig
+	fi
+	
+	umask 022
+	touch $sysctl_orig
+	/sbin/sysctl net.ipv4.tcp_timestamps		>> $sysctl_orig
+	/sbin/sysctl net.ipv4.tcp_sack			>> $sysctl_orig
+	/sbin/sysctl net.core.netdev_max_backlog	>> $sysctl_orig
+	/sbin/sysctl net.core.rmem_max			>> $sysctl_orig
+	/sbin/sysctl net.core.wmem_max			>> $sysctl_orig
+	/sbin/sysctl net.core.rmem_default		>> $sysctl_orig
+	/sbin/sysctl net.core.wmem_default		>> $sysctl_orig
+	/sbin/sysctl net.core.optmem_max		>> $sysctl_orig
+	/sbin/sysctl net.ipv4.tcp_mem			>> $sysctl_orig
+	/sbin/sysctl net.ipv4.tcp_rmem			>> $sysctl_orig
+	/sbin/sysctl net.ipv4.tcp_wmem			>> $sysctl_orig
+
+	/sbin/sysctl -q -w net.ipv4.tcp_timestamps=0
+	/sbin/sysctl -q -w net.ipv4.tcp_sack=0
+	/sbin/sysctl -q -w net.core.netdev_max_backlog=250000
+	/sbin/sysctl -q -w net.core.rmem_max=16777216
+	/sbin/sysctl -q -w net.core.wmem_max=16777216
+	/sbin/sysctl -q -w net.core.rmem_default=16777216
+	/sbin/sysctl -q -w net.core.wmem_default=16777216
+	/sbin/sysctl -q -w net.core.optmem_max=16777216
+	/sbin/sysctl -q -w net.ipv4.tcp_mem="16777216 16777216 16777216"
+	/sbin/sysctl -q -w net.ipv4.tcp_rmem="4096 87380 16777216"
+	/sbin/sysctl -q -w net.ipv4.tcp_wmem="4096 65536 16777216"
+}
+
+unload()
+{
+	if [ ! -f $sysctl_orig ]; then
+		return
+	fi
+
+	/sbin/sysctl -q -p $sysctl_orig
+	/bin/rm -f $sysctl_orig
+}
+
+$1


Property changes on: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ib_ipoib_sysctl
___________________________________________________________________
Added: svn:executable
   + *

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/makefile
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/makefile	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/makefile	2009-05-30 10:58:06 UTC (rev 299)
@@ -11,7 +11,7 @@
 .DELETE_ON_ERROR:
 
 include ./configure.mk.kernel
-include ./ofed_patch.mk
+include ./config.mk
 
 DEPMOD  = /sbin/depmod
 	
@@ -93,6 +93,7 @@
 		CONFIG_INFINIBAND_EHCA_SCALING=$(CONFIG_INFINIBAND_EHCA_SCALING) \
 		CONFIG_RDS=$(CONFIG_RDS) \
 		CONFIG_RDS_IB=$(CONFIG_RDS_IB) \
+		CONFIG_RDS_IWARP=$(CONFIG_RDS_IWARP) \
 		CONFIG_RDS_DEBUG=$(CONFIG_RDS_DEBUG) \
 		CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=$(CONFIG_INFINIBAND_IPOIB_DEBUG_DATA) \
 		CONFIG_INFINIBAND_SDP_SEND_ZCOPY=$(CONFIG_INFINIBAND_SDP_SEND_ZCOPY) \
@@ -138,6 +139,7 @@
 		CONFIG_NFSD_V4=$(CONFIG_NFSD_V4) \
 		CONFIG_NFSD_RDMA=$(CONFIG_NFSD_RDMA) \
 		LINUXINCLUDE=' \
+		-D__OFED_BUILD__ \
 		-include include/linux/autoconf.h \
 		-include $(CWD)/include/linux/autoconf.h \
 		$(BACKPORT_INCLUDES) \
@@ -277,14 +279,14 @@
 	# Copy new rds kernel module to $(DESTDIR)/$(MODULES_DIR)/kernel/drivers/net/rds     
 	if [ -d $(CWD)/lib/modules/$(KVERSION)/kernel/net/rds ]; then \
 		mkdir -p $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds; \
-		mv $(CWD)/lib/modules/$(KVERSION)/kernel/net/rds/rds.ko $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds; \
+		mv $(CWD)/lib/modules/$(KVERSION)/kernel/net/rds/rds*.ko $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds; \
 	fi; \
 	if [ -d $(CWD)/lib/modules/$(KVERSION)/extra/net/rds ]; then \
 		mkdir -p $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds; \
-		mv $(CWD)/lib/modules/$(KVERSION)/extra/net/rds/rds.ko $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds/ ; \
+		mv $(CWD)/lib/modules/$(KVERSION)/extra/net/rds/rds*.ko $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds/ ; \
 	elif [ -d $(CWD)/lib/modules/$(KVERSION)/extra ]; then \
 		mkdir -p $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds; \
-		mv $(CWD)/lib/modules/$(KVERSION)/extra/rds.ko $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds/ ; \
+		mv $(CWD)/lib/modules/$(KVERSION)/extra/rds*.ko $(DESTDIR)/$(MODULES_DIR)/kernel/net/rds/ ; \
 	fi;
 	
 	if [ ! -n "$(DESTDIR)" ]; then $(DEPMOD) -r -ae $(KVERSION);fi;

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/mlx4_en_sysctl
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/mlx4_en_sysctl	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/mlx4_en_sysctl	2009-05-30 10:58:06 UTC (rev 299)
@@ -49,17 +49,17 @@
 	/sbin/sysctl net.ipv4.tcp_rmem			>> $sysctl_orig
 	/sbin/sysctl net.ipv4.tcp_wmem			>> $sysctl_orig
 
-	/sbin/sysctl -w net.ipv4.tcp_timestamps=0
-	/sbin/sysctl -w net.ipv4.tcp_sack=0
-	/sbin/sysctl -w net.core.netdev_max_backlog=250000
-	/sbin/sysctl -w net.core.rmem_max=16777216
-	/sbin/sysctl -w net.core.wmem_max=16777216
-	/sbin/sysctl -w net.core.rmem_default=16777216
-	/sbin/sysctl -w net.core.wmem_default=16777216
-	/sbin/sysctl -w net.core.optmem_max=16777216
-	/sbin/sysctl -w net.ipv4.tcp_mem="16777216 16777216 16777216"
-	/sbin/sysctl -w net.ipv4.tcp_rmem="4096 87380 16777216"
-	/sbin/sysctl -w net.ipv4.tcp_wmem="4096 65536 16777216"
+	/sbin/sysctl -q -w net.ipv4.tcp_timestamps=0
+	/sbin/sysctl -q -w net.ipv4.tcp_sack=0
+	/sbin/sysctl -q -w net.core.netdev_max_backlog=250000
+	/sbin/sysctl -q -w net.core.rmem_max=16777216
+	/sbin/sysctl -q -w net.core.wmem_max=16777216
+	/sbin/sysctl -q -w net.core.rmem_default=16777216
+	/sbin/sysctl -q -w net.core.wmem_default=16777216
+	/sbin/sysctl -q -w net.core.optmem_max=16777216
+	/sbin/sysctl -q -w net.ipv4.tcp_mem="16777216 16777216 16777216"
+	/sbin/sysctl -q -w net.ipv4.tcp_rmem="4096 87380 16777216"
+	/sbin/sysctl -q -w net.ipv4.tcp_wmem="4096 65536 16777216"
 }
 
 unload()

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofa_kernel.spec
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofa_kernel.spec	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofa_kernel.spec	2009-05-30 10:58:06 UTC (rev 299)
@@ -113,6 +113,10 @@
 PreReq: coreutils
 PreReq: kernel
 Prereq: pciutils
+Prereq: grep
+Prereq: perl
+Prereq: procps
+Prereq: module-init-tools
 Version: %{_version}
 Release: %{krelver}
 Summary: Infiniband Driver and ULPs kernel modules
@@ -155,8 +159,8 @@
 cp -a $RPM_BUILD_DIR/%{_name}-%{_version}/include/ $RPM_BUILD_ROOT/%{_prefix}/src/%{_name}
 cp -a $RPM_BUILD_DIR/%{_name}-%{_version}/kernel_addons/ $RPM_BUILD_ROOT/%{_prefix}/src/%{_name}
 cp -a $RPM_BUILD_DIR/%{_name}-%{_version}/configure.mk.kernel $RPM_BUILD_ROOT/%{_prefix}/src/%{_name}
-cp -a $RPM_BUILD_DIR/%{_name}-%{_version}/ofed_patch.mk $RPM_BUILD_ROOT/%{_prefix}/src/%{_name}
-sed -i -e "s@\${CWD}@%{_prefix}/src/%{_name}@g" $RPM_BUILD_ROOT/%{_prefix}/src/%{_name}/ofed_patch.mk
+cp -a $RPM_BUILD_DIR/%{_name}-%{_version}/config.mk $RPM_BUILD_ROOT/%{_prefix}/src/%{_name}
+sed -i -e "s@\${CWD}@%{_prefix}/src/%{_name}@g" $RPM_BUILD_ROOT/%{_prefix}/src/%{_name}/config.mk
 
 # Support external modules include dir like in previous versions
 cd $RPM_BUILD_ROOT/%{_prefix}/src/
@@ -295,6 +299,13 @@
 touch $RPM_BUILD_ROOT/%{IB_CONF_DIR}/connectx.conf
 %endif
 
+%if %{build_ipoib}
+install -d $RPM_BUILD_ROOT/sbin
+install -m 0755 $RPM_BUILD_DIR/%{_name}-%{_version}/ofed_scripts/ib_ipoib_sysctl $RPM_BUILD_ROOT/sbin
+install -d $RPM_BUILD_ROOT/etc/modprobe.d
+install -m 0644 $RPM_BUILD_DIR/%{_name}-%{_version}/ofed_scripts/ib_ipoib.conf $RPM_BUILD_ROOT/etc/modprobe.d
+%endif
+
 %if %{include_udev_rules}
 install -d $RPM_BUILD_ROOT/etc/udev/rules.d
 install -m 0644 $RPM_BUILD_DIR/%{_name}-%{_version}/ofed_scripts/90-ib.rules $RPM_BUILD_ROOT/etc/udev/rules.d
@@ -562,6 +573,12 @@
        echo "CXGB3_LOAD=yes" >> %{IB_CONF_DIR}/openib.conf
 %endif
 
+%if %{build_nes}
+       echo >> %{IB_CONF_DIR}/openib.conf                                                
+       echo "# Load NES modules" >> %{IB_CONF_DIR}/openib.conf
+       echo "NES_LOAD=yes" >> %{IB_CONF_DIR}/openib.conf
+%endif
+
 %if %{build_ipoib}
        echo >> %{IB_CONF_DIR}/openib.conf                                                
        echo "# Load IPoIB" >> %{IB_CONF_DIR}/openib.conf
@@ -696,6 +713,8 @@
 %{LIB_MOD_DIR_INF}/util/ib_madeye.ko
 %endif
 %if %{build_ipoib}
+/sbin/ib_ipoib_sysctl
+/etc/modprobe.d/ib_ipoib.conf
 %{LIB_MOD_DIR_INF}/ulp/ipoib
 %endif
 %if %{build_sdp}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofed_patch.sh
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofed_patch.sh	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/ofed_patch.sh	2009-05-30 10:58:06 UTC (rev 299)
@@ -82,6 +82,7 @@
 	echo 2.6.18_FC6 "(good for RHEL5 as well)"
 	echo 2.6.18-EL5.1  "( RHEL5 U1)"
 	echo 2.6.18-EL5.2  "( RHEL5 U2)"
+	echo 2.6.18-EL5.3  "( RHEL5 U3)"
 	echo 2.6.18_suse10_2
 	echo 2.6.18
 	echo 2.6.19
@@ -93,6 +94,7 @@
 	echo 2.6.24
 	echo 2.6.25
 	echo 2.6.26
+	echo 2.6.27_sles11
 }
 
 # Apply patch
@@ -304,7 +306,7 @@
 
 QUILT=${QUILT:-$(/usr/bin/which quilt  2> /dev/null)}
 CWD=$(pwd)
-CONFIG="ofed_patch.mk"
+CONFIG="config.mk"
 PATCH_DIR=${PATCH_DIR:-""}
 
         # Check parameters
@@ -315,7 +317,7 @@
 
         patches_handle
 
-        # Create ofed_patch.mk
+        # Create config.mk
         /bin/rm -f ${CWD}/${CONFIG}
         cat >> ${CWD}/${CONFIG} << EOFCONFIG
 BACKPORT_INCLUDES=${BACKPORT_INCLUDES}

Modified: branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/openibd
===================================================================
--- branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/openibd	2009-05-30 09:39:06 UTC (rev 298)
+++ branches/ofed-1.4.1upgrade/ofa-kernel/branches/upstream/current/ofed_scripts/openibd	2009-05-30 10:58:06 UTC (rev 299)
@@ -60,10 +60,15 @@
 else
     RUNMODE=auto    
 fi
-            
+
+# Allow unsupported modules, if disallowed by current configuration
+modprobe=/sbin/modprobe
+if ${modprobe} -c | grep -q '^allow_unsupported_modules  *0'; then
+    modprobe="${modprobe} --allow-unsupported-modules"
+fi
+
 ACTION=$1
 shift
-RESTART=0
 max_ports_num_in_hca=0
 
 # Check if OpenIB configured to start automatically
@@ -285,13 +290,13 @@
 
 GEN1_UNLOAD_MODULES="ib_srp_target scsi_target ib_srp kdapltest_module ib_kdapl ib_sdp ib_useraccess ib_useraccess_cm ib_cm ib_dapl_srv ib_ip2pr ib_ipoib ib_tavor mod_thh mod_rhh ib_dm_client ib_sa_client ib_client_query ib_poll ib_mad ib_core ib_services"
 
-UNLOAD_MODULES="ib_mthca mlx4_ib ib_ipath ipath_core ib_ehca iw_cxgb3 cxgb3"
+UNLOAD_MODULES="ib_mthca mlx4_ib ib_ipath ipath_core ib_ehca iw_nes iw_cxgb3 cxgb3"
 UNLOAD_MODULES="$UNLOAD_MODULES ib_ipoib ib_madeye ib_rds"
 UNLOAD_MODULES="$UNLOAD_MODULES rds ib_ucm kdapl ib_srp_target scsi_target ib_srpt ib_srp ib_iser ib_sdp"
 UNLOAD_MODULES="$UNLOAD_MODULES rdma_ucm rdma_cm ib_addr ib_cm ib_local_sa findex"
 UNLOAD_MODULES="$UNLOAD_MODULES ib_sa ib_uverbs ib_umad ib_mad ib_core"
 
-STATUS_MODULES="rdma_ucm ib_rds rds ib_srpt ib_srp qlgc_vnic ib_sdp rdma_cm ib_addr ib_local_sa findex ib_ipoib ib_ehca ib_ipath ipath_core mlx4_core mlx4_ib mlx4_en ib_mthca ib_uverbs ib_umad ib_ucm ib_sa ib_cm ib_mad ib_core iw_cxgb3"
+STATUS_MODULES="rdma_ucm ib_rds rds ib_srpt ib_srp qlgc_vnic ib_sdp rdma_cm ib_addr ib_local_sa findex ib_ipoib ib_ehca ib_ipath ipath_core mlx4_core mlx4_ib mlx4_en ib_mthca ib_uverbs ib_umad ib_ucm ib_sa ib_cm ib_mad ib_core iw_cxgb3 iw_nes"
 
 ipoib_ha_pidfile=/var/run/ipoib_ha.pid
 srp_daemon_pidfile=/var/run/srp_daemon.pid
@@ -428,9 +433,11 @@
     echo "############# DMESG ##############" >> $DEBUG_INFO                                                            
     /bin/dmesg >> $DEBUG_INFO
 
-    echo >> $DEBUG_INFO
-    echo "############# Messages ##############" >> $DEBUG_INFO
-    tail -50 /var/log/messages >> $DEBUG_INFO
+    if [ -r /var/log/messages ]; then
+        echo >> $DEBUG_INFO
+        echo "############# Messages ##############" >> $DEBUG_INFO
+        tail -50 /var/log/messages >> $DEBUG_INFO
+    fi
 
     echo >> $DEBUG_INFO
     echo "############# Running Processes ##############" >> $DEBUG_INFO
@@ -782,7 +789,7 @@
     
     # Load Mellanox HCA driver
     if [ "X${MTHCA_LOAD}" == "Xyes" ]; then
-        /sbin/modprobe ib_mthca > /dev/null 2>&1
+        ${modprobe} ib_mthca > /dev/null 2>&1
         my_rc=$?
         if [ $my_rc -ne 0 ]; then
                 echo_failure $"Loading Mellanox HCA driver: "
@@ -791,7 +798,7 @@
     fi
 
     if [ "X${MLX4_LOAD}" == "Xyes" ]; then
-        /sbin/modprobe mlx4_core > /dev/null 2>&1
+        ${modprobe} mlx4_core > /dev/null 2>&1
         my_rc=$?
         if [ $my_rc -ne 0 ]; then
                 echo_failure $"Loading Mellanox MLX4 HCA driver: "
@@ -801,7 +808,7 @@
                          . /etc/infiniband/connectx.conf > /dev/null 2>&1
                 fi
         fi
-        /sbin/modprobe mlx4_ib > /dev/null 2>&1
+        ${modprobe} mlx4_ib > /dev/null 2>&1
         my_rc=$?
         if [ $my_rc -ne 0 ]; then
                 echo_failure $"Loading Mellanox MLX4_IB HCA driver: "
@@ -811,10 +818,10 @@
 
     if [ "X${MLX4_EN_LOAD}" == "Xyes" ]; then
         if ! is_module mlx4_core; then
-                /sbin/modprobe mlx4_core > /dev/null 2>&1
+                ${modprobe} mlx4_core > /dev/null 2>&1
         fi
 
-        /sbin/modprobe mlx4_en > /dev/null 2>&1
+        ${modprobe} mlx4_en > /dev/null 2>&1
         my_rc=$?
         if [ $my_rc -ne 0 ]; then
                 echo_failure $"Loading Mellanox MLX4_EN HCA driver: "
@@ -824,7 +831,7 @@
 
     # Load QLogic InfiniPath driver
     if [ "X${IPATH_LOAD}" == "Xyes" ]; then
-        /sbin/modprobe ib_ipath > /dev/null 2>&1
+        ${modprobe} ib_ipath > /dev/null 2>&1
         my_rc=$?
         if [ $my_rc -ne 0 ]; then
                 echo_failure $"Loading QLogic InfiniPath driver: "
@@ -837,7 +844,7 @@
     # Load eHCA driver
     if [ "X${EHCA_LOAD}" == "Xyes" ]; then
         fix_location_codes
-        /sbin/modprobe ib_ehca > /dev/null 2>&1
+        ${modprobe} ib_ehca > /dev/null 2>&1
         my_rc=$?
         if [ $my_rc -ne 0 ]; then
                 echo_failure $"Loading eHCA driver: "
@@ -848,7 +855,7 @@
     # Load iw_cxgb3 driver
     if [ "X${CXGB3_LOAD}" == "Xyes" ]; then
         fix_location_codes
-        /sbin/modprobe iw_cxgb3 > /dev/null 2>&1
+        ${modprobe} iw_cxgb3 > /dev/null 2>&1
         my_rc=$?
         if [ $my_rc -ne 0 ]; then
                 echo_failure $"Loading cxgb3 driver: "
@@ -856,6 +863,17 @@
         RC=$[ $RC + $my_rc ]
     fi
 
+    # Load iw_nes driver
+    if [ "X${NES_LOAD}" == "Xyes" ]; then
+        fix_location_codes
+        ${modprobe} iw_nes > /dev/null 2>&1
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading nes driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
 cat << EOF >> /tmp/ib_set_node_desc.sh
 #!/bin/bash
 
@@ -880,13 +898,13 @@
     chmod 755 /tmp/ib_set_node_desc.sh
     /tmp/ib_set_node_desc.sh > /dev/null 2>&1 &
 
-    /sbin/modprobe ib_umad > /dev/null 2>&1
+    ${modprobe} ib_umad > /dev/null 2>&1
     RC=$[ $RC + $? ]
-    /sbin/modprobe ib_uverbs > /dev/null 2>&1
+    ${modprobe} ib_uverbs > /dev/null 2>&1
     RC=$[ $RC + $? ]
     
     if [ $IPOIB -eq 1 ]; then
-        /sbin/modprobe ib_ipoib > /dev/null 2>&1
+        ${modprobe} ib_ipoib > /dev/null 2>&1
         RC=$[ $RC + $? ]
     fi
 
@@ -971,6 +989,11 @@
                         fi
                 done
         done    
+        if [ "X${SET_IPOIB_CM}" == "Xyes" ]; then
+                if  [ -x /sbin/ib_ipoib_sysctl ]; then
+                    /sbin/ib_ipoib_sysctl load
+                fi
+        fi
         echo_done "Setting up service network   .   .   ."
         
     fi    
@@ -982,10 +1005,10 @@
                 case $mod in
                         ib_iser)
                                 # Voltaire requirement
-                                /sbin/modprobe --force-modversion $mod > /dev/null 2>&1
+                                ${modprobe} --force-modversion $mod > /dev/null 2>&1
                         ;;
                         ib_srp)
-                                /sbin/modprobe $mod > /dev/null 2>&1
+                                ${modprobe} $mod > /dev/null 2>&1
                                 if [ "X${SRPHA_ENABLE}" == "Xyes" ]; then
                                     if [ ! -x /sbin/multipath ]; then
                                         echo "/sbin/multipath is required to enable SRP HA."
@@ -997,7 +1020,7 @@
 ACTION=="add", KERNEL=="sd*[!0-9]", RUN+="/sbin/multipath %M:%m"
 EOF
                                         fi
-                                        /sbin/modprobe dm_multipath > /dev/null 2>&1
+                                        ${modprobe} dm_multipath > /dev/null 2>&1
                                         srp_daemon.sh &
                                         srp_daemon_pid=$!
                                         echo ${srp_daemon_pid} > ${srp_daemon_pidfile}
@@ -1009,7 +1032,7 @@
                                 fi
                         ;;
                         *)
-                                /sbin/modprobe $mod > /dev/null 2>&1
+                                ${modprobe} $mod > /dev/null 2>&1
                         ;;
                 esac
                 RC=$?
@@ -1115,16 +1138,16 @@
 		case $mod in
 			ib_ipath)
 				# infinipath depends on modprobe.conf remove rule
-				/sbin/modprobe -v -r $mod > /dev/null 2>&1
+				${modprobe} -v -r $mod > /dev/null 2>&1
 				sleep 2
 			;;
-			ib_mthca | mlx4_ib | ib_ehca | iw_cxgb3)
+			ib_mthca | mlx4_ib | ib_ehca | iw_cxgb3 | iw_nes)
                                 rm_mod $mod
 				sleep 2
 			;;
 			*)
-			# is_module ib_ipoib && /sbin/modprobe -r ib_ipoib > /dev/null 2>&1
-                	/sbin/modprobe -r $mod > /dev/null 2>&1
+			# is_module ib_ipoib && ${modprobe} -r ib_ipoib > /dev/null 2>&1
+                	${modprobe} -r $mod > /dev/null 2>&1
                 	if [ $? -ne 0 ] || is_module $mod; then
                 	        # Try rmmod if modprobe failed: case that previous installation included more IB modules.
                 	        rm_mod $mod
@@ -1211,25 +1234,9 @@
 
         # Stop Gen1 modules if they are UP after uninstall
         if is_module ib_tavor; then
-                    stop_gen1
-                if [ $RESTART -eq 0 ]; then
-                        exit 0
-                else
-                        return 0
-                fi
-       fi
+                stop_gen1
+        fi
     
-        if ! is_module ib_core && ! is_module mlx4_en; then
-            if [ $RESTART -eq 0 ]; then
-                echo
-                echo_failure $"HCA driver is not loaded or loaded with errors"
-                echo
-                exit 1
-            else
-                    return 0
-            fi                        
-        fi
-
         # Stop IPoIB HA daemon if running
         if [ -f $ipoib_ha_pidfile ]; then
                 local line p
@@ -1304,13 +1311,11 @@
                 done
         fi
 
-        local done=0
-	# Unload mlx4_core
-	if is_module mlx4_core; then
-	    is_ref mlx4_core
-	    if [ $? -eq 0 ]; then
-		unload mlx4_core
-                done=1
+        # Unload mlx4_core
+        if is_module mlx4_core; then
+            is_ref mlx4_core
+            if [ $? -eq 0 ]; then
+                unload mlx4_core
             elif is_module mlx4_en; then
                 # Unload mlx4_en if one or more of the following cases takes place:
                 # - No MLX4 eth devices present
@@ -1319,17 +1324,18 @@
                     echo "MLX4_EN module is loaded and in use."
                     echo "To unload MLX4_EN run: 'modprobe -r mlx4_en mlx4_core'"
                 else
-		    unload mlx4_en
-		    unload mlx4_core
-                    done=1
+                    unload mlx4_en
+                    unload mlx4_core
                 fi
-	    fi
-	fi
+            fi
+        fi
 
-        if [ $done -eq 1 ]; then
-            /bin/rm -rf /dev/infiniband
-            echo_success $"Unloading HCA driver: "
+        if  [ -x /sbin/ib_ipoib_sysctl ]; then
+            /sbin/ib_ipoib_sysctl unload
         fi
+
+        /bin/rm -rf /dev/infiniband
+        echo_success $"Unloading HCA driver: "
         sleep 1
 }
 
@@ -1337,7 +1343,7 @@
 {
     local RC=0
  
-       if is_module ib_mthca || is_module mlx4_core || is_module ib_ipath || is_module ib_ehca || is_module iw_cxgb3; then
+       if is_module ib_mthca || is_module mlx4_core || is_module ib_ipath || is_module ib_ehca || is_module iw_cxgb3 || is_module iw_nes; then
                echo
                echo "  HCA driver loaded"
                echo
@@ -1426,7 +1432,6 @@
                 stop    
                 ;;
         restart)
-                RESTART=1
                 stop
                 start
                 ;;




More information about the Pkg-ofed-commits mailing list