[pkg-nvidia-devel] r379 - in /packages/nvidia-graphics-drivers/trunk: debian.binary/ debian.binary/patches.old/ debian/ patches.dpatch.save/ patches.save/ patches/

rdonald at users.alioth.debian.org rdonald at users.alioth.debian.org
Wed May 7 05:09:46 UTC 2008


Author: rdonald
Date: Wed May  7 05:09:46 2008
New Revision: 379

URL: http://svn.debian.org/wsvn/pkg-nvidia/?sc=1&rev=379
Log:
add Len's kernel module patch

Added:
    packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/
    packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/00list
    packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/01_sysfs
    packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/02_pcialias
    packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/03_pci_get_class
    packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/04_minion
    packages/nvidia-graphics-drivers/trunk/patches.save/
    packages/nvidia-graphics-drivers/trunk/patches.save/xenrt
Removed:
    packages/nvidia-graphics-drivers/trunk/debian.binary/patches.old/
    packages/nvidia-graphics-drivers/trunk/patches/xenrt
Modified:
    packages/nvidia-graphics-drivers/trunk/debian.binary/changelog
    packages/nvidia-graphics-drivers/trunk/debian.binary/conf.mk
    packages/nvidia-graphics-drivers/trunk/debian/rules

Modified: packages/nvidia-graphics-drivers/trunk/debian.binary/changelog
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/debian.binary/changelog?rev=379&op=diff
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/debian.binary/changelog (original)
+++ packages/nvidia-graphics-drivers/trunk/debian.binary/changelog Wed May  7 05:09:46 2008
@@ -1,3 +1,14 @@
+nvidia-kernel (169.12-1) unstable; urgency=low
+
+  * fix shlib dep
+  * New upstream.
+  * remove xserver-xorg-core dependency. 
+  * add cuda files (closes: #463776)
+  * add bzip2 to build-depends. (closes: #466139)
+  * add uploaders field that went missing: (closes: #454999, #472448)
+
+ -- Randall Donald <rdonald at debian.org>  Sat, 05 Apr 2008 17:59:56 -0700
+
 nvidia-kernel (169.09-1) unstable; urgency=low
 
   * New upstream. (with fan fix!)

Modified: packages/nvidia-graphics-drivers/trunk/debian.binary/conf.mk
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/debian.binary/conf.mk?rev=379&op=diff
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/debian.binary/conf.mk (original)
+++ packages/nvidia-graphics-drivers/trunk/debian.binary/conf.mk Wed May  7 05:09:46 2008
@@ -1,7 +1,4 @@
 #!/usr/bin/make -f
-
-dirname=nv
-
 
 # if you need to redefine these
 KSRC:=$(KSRC)
@@ -9,7 +6,7 @@
 
 
 # redefine the directory for kbuild to fine the makefile.
-KBUILD_PARAMS := "-C $(KSRC) SUBDIRS=$(CURDIR)/$(dirname)"
+KBUILD_PARAMS := "-C $(KSRC) SUBDIRS=$(CURDIR)"
 
     
 #### COMPILER DETECTION ####
@@ -78,17 +75,13 @@
 
 %.Makefile :
 	# select which makefile to use.
-	rm -f $(CURDIR)/$(dirname)/Makefile || true
+	rm -f $(CURDIR)/Makefile || true
 	if [ $(PATCHLEVEL) = 6  ]; then \
-	     cd $(CURDIR)/$(dirname) ; \
 	     ln -s Makefile.kbuild Makefile ; \
-	     cd .. ; \
 	fi
 	
 	if [  $(PATCHLEVEL) = 4  ]; then \
-	     cd $(CURDIR)/$(dirname) ; \
 	     ln -s Makefile.nvidia Makefile ; \
-	     cd .. ; \
 	fi
 
 
@@ -108,7 +101,7 @@
 		cp  $(CURDIR)/debian/control.template $(CURDIR)/debian/control; \
 	fi
 	if [ "$(kernel_arch)" = "x86_64" ]; then \
-		cp $(CURDIR)/nv-kernel.o.x86_64 $(CURDIR)/nv/nv-kernel.o ; \
+		cp $(CURDIR)/nv-kernel.o.x86_64 $(CURDIR)/nv-kernel.o ; \
 	fi   
 	touch configure-stamp
 

Modified: packages/nvidia-graphics-drivers/trunk/debian/rules
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/debian/rules?rev=379&op=diff
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/debian/rules (original)
+++ packages/nvidia-graphics-drivers/trunk/debian/rules Wed May  7 05:09:46 2008
@@ -91,7 +91,7 @@
 
 	    # build kernel module source tarball
 	    mkdir -p $(CURDIR)/debian/temp/modules/nvidia-kernel/debian
-	    mkdir -p $(CURDIR)/debian/temp/modules/nvidia-kernel/nv
+	    
 	    cp -a $(CURDIR)/debian.binary/* $(CURDIR)/debian/temp/modules/nvidia-kernel/debian
 	       
 	    for f in `ls $(CURDIR)/debian.binary` ; do \
@@ -105,12 +105,13 @@
 		chmod 0644 $(CURDIR)/debian/temp/modules/nvidia-kernel/debian/$$f ; \
 	    done
 #	    chmod 755 $(CURDIR)/debian/temp/modules/nvidia-kernel/debian/patches	    
-	    cp $(CURDIR)/$(dirname)/usr/src/nv/* $(CURDIR)/debian/temp/modules/nvidia-kernel/nv || true
+	    cp $(CURDIR)/$(dirname)/usr/src/nv/* $(CURDIR)/debian/temp/modules/nvidia-kernel/ || true
 #not yet	    cp $(CURDIR)/Makefile $(CURDIR)/debian/temp/modules/nvidia-kernel/ || true
 	    cp $(CURDIR)/$(dirname_x86_64)/usr/src/nv/nv-kernel.o $(CURDIR)/debian/temp/modules/nvidia-kernel/nv-kernel.o.x86_64 || true
+	    cp $(CURDIR)/debian/temp/modules/nvidia-kernel/Makefile.kbuild $(CURDIR)/debian/temp/modules/nvidia-kernel/Makefile
 	    chmod 755 $(CURDIR)/debian/temp/modules/nvidia-kernel/debian/rules
 	    chown -R root:src $(CURDIR)/debian/temp/modules
-	    chmod 755 $(CURDIR)/debian/temp/modules/nvidia-kernel/debian/patches.old
+	    
 	    tar -jcvf $(CURDIR)/nvidia-kernel.tar.bz2 -C $(CURDIR)/debian/temp modules
 	    rm -rf debian/temp 
 	    touch build-kernel-stamp

Added: packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/00list
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/00list?rev=379&op=file
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/00list (added)
+++ packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/00list Wed May  7 05:09:46 2008
@@ -1,0 +1,4 @@
+01_sysfs
+02_pcialias
+03_pci_get_class
+04_minion

Added: packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/01_sysfs
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/01_sysfs?rev=379&op=file
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/01_sysfs (added)
+++ packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/01_sysfs Wed May  7 05:09:46 2008
@@ -1,0 +1,207 @@
+#!/bin/sh -e
+
+# Patch by  Martin Schlemmer (i believe as posted to the lkml)
+
+if [ $# -lt 1 ]; then
+    echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+    exit 1
+fi
+	
+patch_opts="-f -N --no-backup-if-mismatch -p3"
+
+echo $1
+
+case "$1" in
+	-patch) patch $patch_opts -p3 < $0;;
+	-unpatch) patch $patch_opts -p3 -R < $0;;
+	*)
+     		echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+		exit 1;;
+esac
+
+exit 0
+
+ at DPATCH@
+
+diff -urN NVIDIA-Linux-x86-1.0-5336/usr/src/nv/Makefile.kbuild NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/Makefile.kbuild
+--- NVIDIA-Linux-x86-1.0-5336/usr/src/nv/Makefile.kbuild	2004-01-15 05:29:12.000000000 +0200
++++ NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/Makefile.kbuild	2004-05-22 21:04:52.143088872 +0200
+@@ -117,6 +117,9 @@
+   MODULE_OBJECT := $(MODULE_NAME).o
+ else
+   MODULE_OBJECT := $(MODULE_NAME).ko
++
++  # We need this for the conftest.sh tests to work
++  KERNEL_HEADERS += -I$(KERNEL_SOURCES)/include/asm/mach-generic
+ endif
+ 
+ #
+@@ -146,6 +149,16 @@
+   EXTRA_CFLAGS += -DREMAP_PAGE_RANGE_4
+ endif
+ 
++ifeq ($(shell sh $(src)/conftest.sh remap_page_range $(KERNEL_HEADERS)),)
++  # Check if 'conftest.sh remap_page_rage' have no output - if so, we
++  # can expect issues ... !
++  $(error Cannot compile tests!  Please check $$KERNEL_HEADERS)
++endif
++
++ifeq ($(shell sh $(src)/conftest.sh class_simple $(KERNEL_HEADERS)), yes)
++  EXTRA_CFLAGS += -DHAVE_CLASS_SIMPLE
++endif
++  
+ #
+ # NVIDIA binary object file includes .common section.
+ #
+diff -urN NVIDIA-Linux-x86-1.0-5336/usr/src/nv/conftest.sh NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/conftest.sh
+--- NVIDIA-Linux-x86-1.0-5336/usr/src/nv/conftest.sh	2004-01-15 05:29:11.000000000 +0200
++++ NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/conftest.sh	2004-05-22 20:58:19.382797512 +0200
+@@ -42,6 +42,29 @@
+         fi
+     ;;
+ 
++    class_simple)
++        shift
++        #
++        # Determine if we have struct class_simple needed for limited sysfs
++        # support in 2.6
++        #
++        echo "#include <linux/device.h>
++        void add_test_class(void) {
++           struct class_simple *test_class;
++           test_class = class_simple_create(THIS_MODULE, \"test\");
++        }" > conftest.c
++
++        gcc -Wall -c conftest.c -o conftest.o $* -D__KERNEL__  > output.log 2>&1
++
++        if test -f conftest.o && test -z "`cat output.log`"; then
++          echo "yes"
++        else
++          echo "no"
++        fi
++
++        rm -f conftest.{c,o} output.log
++    ;;
++
+     cc_sanity_check)
+         shift
+         #
+diff -urN NVIDIA-Linux-x86-1.0-5336/usr/src/nv/nv-linux.h NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/nv-linux.h
+--- NVIDIA-Linux-x86-1.0-5336/usr/src/nv/nv-linux.h	2004-01-15 05:29:11.000000000 +0200
++++ NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/nv-linux.h	2004-05-22 20:58:19.387796752 +0200
+@@ -448,6 +448,33 @@
+ #  endif // defined(KERNEL_2_4)
+ #endif // defined(CONFIG_DEVFS_FS)
+ 
++#if defined(KERNEL_2_6) && defined(HAVE_CLASS_SIMPLE)
++#  define NV_SYSFS_REGISTER						\
++    nvidia_class = class_simple_create(THIS_MODULE, "nvidia");
++
++#  define NV_SYSFS_ADD_CONTROL						\
++     class_simple_device_add(nvidia_class,				\
++                             MKDEV(NV_MAJOR_DEVICE_NUMBER, 255),	\
++                             NULL, "nvidiactl");
++
++#  define NV_SYSFS_ADD_DEVICE(_name, _minor)				\
++     class_simple_device_add(nvidia_class,				\
++                             MKDEV(NV_MAJOR_DEVICE_NUMBER, _minor),	\
++                             &nv_linux_devices[_minor].dev->dev, _name);
++
++#  define NV_SYSFS_REMOVE_DEVICE(i)					\
++            class_simple_device_remove(MKDEV(NV_MAJOR_DEVICE_NUMBER, i));
++
++#  define NV_SYSFS_UNREGISTER						\
++    class_simple_destroy(nvidia_class);
++#else
++#  define NV_SYSFS_REGISTER
++#  define NV_SYSFS_ADD_CONTROL
++#  define NV_SYSFS_ADD_DEVICE(_name, _minor)
++#  define NV_SYSFS_REMOVE_DEVICE(i)
++#  define NV_SYSFS_UNREGISTER
++#endif
++
+ 
+ /*
+  * Linux 2.5 introduced the five argument version of remap_page_range, all
+diff -urN NVIDIA-Linux-x86-1.0-5336/usr/src/nv/nv.c NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/nv.c
+--- NVIDIA-Linux-x86-1.0-5336/usr/src/nv/nv.c	2004-01-15 05:29:11.000000000 +0200
++++ NVIDIA-Linux-x86-1.0-5336.sysfs/usr/src/nv/nv.c	2004-05-22 20:58:19.393795840 +0200
+@@ -47,6 +47,10 @@
+ devfs_handle_t nv_devfs_handles[NV_MAX_DEVICES+1];
+ #endif
+ 
++#if defined(KERNEL_2_6) && defined(HAVE_CLASS_SIMPLE)
++struct class_simple *nvidia_class;
++#endif
++
+ // #define NV_DBG_MEM 1
+ #undef NV_DBG_MEM
+ 
+@@ -874,23 +878,31 @@
+     rc = register_chrdev(nv_major, "nvidia", &nv_fops);
+ #endif
+ 
++    NV_SYSFS_REGISTER;
++
+     if (rc < 0)
+     {
+         nv_printf(NV_DBG_ERRORS, "nvidia_init_module: register failed\n");
+         return rc;
+     }
+ 
+-#ifdef CONFIG_DEVFS_FS
++#if defined(CONFIG_DEVFS_FS) || defined(KERNEL_2_6)
+     do
+     {
+         char name[10];
+ 
++# if defined(CONFIG_DEVFS_FS)
+         nv_devfs_handles[0] = NV_DEVFS_REGISTER("nvidiactl", 255);
++# endif
++        NV_SYSFS_ADD_CONTROL;
+ 
+         for (i = 0; i < num_nv_devices; i++)
+         {
+             sprintf(name, "nvidia%d", i);
++# if defined(CONFIG_DEVFS_FS)
+             nv_devfs_handles[i+1] = NV_DEVFS_REGISTER(name, i);
++# endif
++            NV_SYSFS_ADD_DEVICE(name, i);
+         }
+     } while(0);
+ #endif
+@@ -951,6 +963,7 @@
+ #else
+     unregister_chrdev(nv_major, "nvidia");
+ #endif
++    NV_SYSFS_UNREGISTER;
+     return rc;
+ }
+ 
+@@ -1011,15 +1024,24 @@
+         nv_printf(NV_DBG_ERRORS, "nvidia_exit_module: unregister nv failed\n");
+     }
+ 
+-#ifdef CONFIG_DEVFS_FS
++#if defined(CONFIG_DEVFS_FS) || defined(KERNEL_2_6)
+     do {
+         int i;
++# if defined(CONFIG_DEVFS_FS)
+         NV_DEVFS_REMOVE_CONTROL();
+-        for (i = 0; i < num_nv_devices; i++)
++# endif
++        NV_SYSFS_REMOVE_DEVICE(255);
++        for (i = 0; i < num_nv_devices; i++) {
++# if defined(CONFIG_DEVFS_FS)
+             NV_DEVFS_REMOVE_DEVICE(i);
++# endif
++            NV_SYSFS_REMOVE_DEVICE(i);
++        }
+     } while (0);
+ #endif
+ 
++    NV_SYSFS_UNREGISTER;
++
+ #if NV_ENABLE_MEM_TRACKING
+     nv_list_mem(vm_list);
+     nv_list_mem(km_list);

Added: packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/02_pcialias
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/02_pcialias?rev=379&op=file
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/02_pcialias (added)
+++ packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/02_pcialias Wed May  7 05:09:46 2008
@@ -1,0 +1,48 @@
+#!/bin/sh -e
+
+# Patch by Juergen Kreileder <jk at blackdown.de>
+
+if [ $# -lt 1 ]; then
+    echo >&2 "basename $0: script expects -patch|-unpatch as argument"
+    exit 1
+fi
+	  
+patch_opts="-f -N --no-backup-if-mismatch -p3"
+	  
+echo $1
+	  
+case "$1" in
+     -patch) patch $patch_opts -p0 < $0;;
+     -unpatch) patch $patch_opts -p0 -R < $0;;
+     *)
+        echo >&2 "basename $0: script expects -patch|-unpatch as argument"
+        exit 1;;
+esac
+	       
+exit 0
+	       
+ at DPATCH@
+--- /home/jk/nv.c	2004-03-25 07:24:42.000000000 +0100
++++ nv/nv.c	2004-03-25 07:44:24.000000000 +0100
+@@ -47,6 +47,21 @@
+ devfs_handle_t nv_devfs_handles[NV_MAX_DEVICES+1];
+ #endif
+ 
++#if defined(KERNEL_2_6) && defined(HAVE_CLASS_SIMPLE)
++static struct pci_device_id nvidia_pci_tbl[] = {
++    {
++        .class          = (PCI_CLASS_DISPLAY_VGA << 8),
++        .class_mask     = ~0,
++        .vendor         = PCI_VENDOR_ID_NVIDIA,
++        .device         = PCI_ANY_ID,
++        .subvendor      = PCI_ANY_ID,
++        .subdevice      = PCI_ANY_ID,
++    },
++    { }
++};
++MODULE_DEVICE_TABLE (pci, nvidia_pci_tbl);
++#endif
++
+ // #define NV_DBG_MEM 1
+ #undef NV_DBG_MEM
+ 

Added: packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/03_pci_get_class
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/03_pci_get_class?rev=379&op=file
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/03_pci_get_class (added)
+++ packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/03_pci_get_class Wed May  7 05:09:46 2008
@@ -1,0 +1,40 @@
+#!/bin/sh -e
+
+# Patch by Juergen Kreileder <jk at blackdown.de>
+
+if [ $# -lt 1 ]; then
+    echo >&2 "basename $0: script expects -patch|-unpatch as argument"
+    exit 1
+fi
+	  
+patch_opts="-f -N --no-backup-if-mismatch -p3"
+	  
+echo $1
+	  
+case "$1" in
+     -patch) patch $patch_opts -p0 < $0;;
+     -unpatch) patch $patch_opts -p0 -R < $0;;
+     *)
+        echo >&2 "basename $0: script expects -patch|-unpatch as argument"
+        exit 1;;
+esac
+	       
+exit 0
+	       
+ at DPATCH@
+--- nv/nv.c.orig        2004-07-27 17:30:05.000000000 +0200
++++ nv/nv.c     2004-10-24 18:34:11.035105823 +0200
+@@ -15,6 +15,13 @@
+ #include "nv_compiler.h"
+ #include "os-agp.h"
+ 
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 8))
++#define __VMALLOC_RESERVE (128 << 20)
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9))
++#define pci_find_class(a, b) pci_get_class(a, b)
++#endif
++#endif
++
+ #ifdef MODULE_ALIAS_CHARDEV_MAJOR
+ MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
+ #endif

Added: packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/04_minion
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/04_minion?rev=379&op=file
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/04_minion (added)
+++ packages/nvidia-graphics-drivers/trunk/patches.dpatch.save/04_minion Wed May  7 05:09:46 2008
@@ -1,0 +1,5911 @@
+#!/bin/sh -e
+
+# Patch from accumulated from http://www.minion.de/files/1.0-6629/
+
+if [ $# -lt 1 ]; then
+    echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+    exit 1
+fi
+	
+patch_opts="-f -N --no-backup-if-mismatch -p2"
+
+echo $1
+
+case "$1" in
+	-patch) patch $patch_opts -p1 < $0;;
+	-unpatch) patch $patch_opts -p1 -R < $0;;
+	*)
+     		echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+		exit 1;;
+esac
+
+exit 0
+
+ at DPATCH@
+
+diff -ruN nvidia-kernel.orig/nv/nv-linux.h nvidia-kernel/nv/nv-linux.h
+--- nvidia-kernel.orig/nv/nv-linux.h	2005-01-11 17:19:49.000000000 -0800
++++ nvidia-kernel/nv/nv-linux.h	2005-02-18 15:08:12.951057467 -0800
+@@ -155,6 +155,10 @@
+ #endif
+ #endif /* defined(NVCPU_X86) */
+ 
++#ifndef get_cpu
++#define get_cpu() smp_processor_id()
++#define put_cpu()
++#endif
+ 
+ #if !defined (list_for_each)
+ #define list_for_each(pos, head) \
+@@ -429,6 +433,30 @@
+         free_pages(ptr, order); \
+     }
+ 
++#define NV_KMEM_CACHE_CREATE(kmem_cache, name, type)            \
++    {                                                           \
++        kmem_cache = kmem_cache_create(name, sizeof(type),      \
++                        0, 0, NULL, NULL);                      \
++    } 
++
++#define NV_KMEM_CACHE_DESTROY(kmem_cache)                       \
++    {                                                           \
++        kmem_cache_destroy(kmem_cache);                         \
++        kmem_cache = NULL;                                      \
++    } 
++
++#define NV_KMEM_CACHE_ALLOC(ptr, kmem_cache, type)              \
++    {                                                           \
++        (ptr) = kmem_cache_alloc(kmem_cache, GFP_KERNEL);       \
++        KM_ALLOC_RECORD(ptr, sizeof(type), "km_cache_alloc");   \
++    } 
++
++#define NV_KMEM_CACHE_FREE(ptr, type, kmem_cache)               \
++    {                                                           \
++        KM_FREE_RECORD(ptr, sizeof(type), "km_cache_free");     \
++        kmem_cache_free(kmem_cache, ptr);                       \
++    } 
++
+ #endif /* !defined NVWATCH */
+ 
+ 
+@@ -480,12 +508,22 @@
+ #define NV_PCI_RESOURCE_SIZE(dev, bar)  ((dev)->resource[(bar) - 1].end - (dev)->resource[(bar) - 1].start + 1)
+ 
+ #define NV_PCI_BUS_NUMBER(dev)        (dev)->bus->number
+-#define NV_PCI_SLOT_NUMBER(dev)       PCI_SLOT((dev)->devfn)
++#define NV_PCI_DEVFN(dev)             (dev)->devfn
++#define NV_PCI_SLOT_NUMBER(dev)       PCI_SLOT(NV_PCI_DEVFN(dev))
+ 
+ #ifdef NV_PCI_GET_CLASS_PRESENT
+ #define NV_PCI_DEV_PUT(dev)                    pci_dev_put(dev)
+ #define NV_PCI_GET_DEVICE(vendor,device,from)  pci_get_device(vendor,device,from)
+-#define NV_PCI_GET_SLOT(bus,devfn)             pci_get_slot(pci_find_bus(0,bus),devfn)
++#define NV_PCI_GET_SLOT(bus,devfn)                                       \
++   ({                                                                    \
++        struct pci_dev *__dev = NULL;                                    \
++        while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, __dev)))  \
++        {                                                                \
++            if (NV_PCI_BUS_NUMBER(__dev) == bus                          \
++                    && NV_PCI_DEVFN(__dev) == devfn) break;              \
++        }                                                                \
++        __dev;                                                           \
++    })
+ #define NV_PCI_GET_CLASS(class,from)           pci_get_class(class,from)
+ #else
+ #define NV_PCI_DEV_PUT(dev)
+@@ -533,6 +571,7 @@
+  */
+ #if defined(CONFIG_SWIOTLB)
+ #define NV_SWIOTLB 1
++#define NV_SWIOTLB_MAX_RETRIES 16
+ extern int nv_swiotlb;
+ #endif
+ 
+@@ -565,11 +604,6 @@
+ #define PCI_CAP_ID_EXP 0x10
+ #endif
+ 
+-#if defined(KERNEL_2_6) && defined(AGPGART)
+-typedef struct agp_kern_info agp_kern_info;
+-typedef struct agp_memory agp_memory;
+-#endif
+-
+ #if defined(CONFIG_DEVFS_FS)
+ #  if defined(KERNEL_2_6)
+      typedef void* devfs_handle_t;
+@@ -627,75 +661,109 @@
+ #define NV_REMAP_PAGE_RANGE(x...) remap_page_range(x)
+ #endif
+ 
++
++#define NV_PGD_OFFSET(address, kernel, mm)              \
++   ({                                                   \
++        pgd_t *__pgd;                                   \
++        if (!kernel)                                    \
++            __pgd = pgd_offset(mm, address);            \
++        else                                            \
++            __pgd = pgd_offset_k(address);              \
++        __pgd;                                          \
++    })
++
++#define NV_PGD_PRESENT(pgd)                             \
++   ({                                                   \
++         if ((pgd != NULL) &&                           \
++             (pgd_bad(*pgd) || pgd_none(*pgd)))         \
++            /* static */ pgd = NULL;                    \
++         pgd != NULL;                                   \
++    })
++
+ #if defined(pmd_offset_map)
+-#define NV_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
+-    { \
+-        pg_mid_dir = pmd_offset_map(pg_dir, address); \
+-    }
+-#define NV_PMD_UNMAP(pg_mid_dir) \
+-    { \
+-        pmd_unmap(pg_mid_dir); \
+-    }
+-#else
+-#define NV_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
+-    { \
+-        pg_mid_dir = pmd_offset(pg_dir, address); \
+-    }
+-#define NV_PMD_UNMAP(pg_mid_dir)
++#define NV_PMD_OFFSET(address, pgd)                     \
++   ({                                                   \
++        pmd_t *__pmd;                                   \
++        __pmd = pmd_offset_map(pgd, address);           \
++   })
++#define NV_PMD_UNMAP(pmd) pmd_unmap(pmd);
++#else
++#if defined(PUD_SHIFT) /* 4-level pgtable */
++#define NV_PMD_OFFSET(address, pgd)                     \
++   ({                                                   \
++        pmd_t *__pmd = NULL;                            \
++        pud_t *__pud;                                   \
++        __pud = pud_offset(pgd, address);               \
++        if ((__pud != NULL) &&                          \
++            !(pud_bad(*__pud) || pud_none(*__pud)))     \
++            __pmd = pmd_offset(__pud, address);         \
++        __pmd;                                          \
++    })
++#else /* 3-level pgtable */
++#define NV_PMD_OFFSET(address, pgd)                     \
++   ({                                                   \
++        pmd_t *__pmd;                                   \
++        __pmd = pmd_offset(pgd, address);               \
++    })
++#endif
++#define NV_PMD_UNMAP(pmd)
+ #endif
+ 
+-#define NV_PMD_PRESENT(pg_mid_dir) \
+-    ({ \
+-        if ( (pg_mid_dir) && (pmd_none(*pg_mid_dir))) { \
+-            NV_PMD_UNMAP(pg_mid_dir); pg_mid_dir = NULL; \
+-        } pg_mid_dir != NULL; \
++#define NV_PMD_PRESENT(pmd)                             \
++   ({                                                   \
++        if ((pmd != NULL) &&                            \
++            (pmd_bad(*pmd) || pmd_none(*pmd)))          \
++        {                                               \
++            NV_PMD_UNMAP(pmd);                          \
++            pmd = NULL; /* mark invalid */              \
++        }                                               \
++        pmd != NULL;                                    \
+     })
+ 
+ #if defined(pte_offset_atomic)
+-#define NV_PTE_OFFSET(addres, pg_mid_dir, pte) \
+-    { \
+-        pte = pte_offset_atomic(pg_mid_dir, address); \
+-        NV_PMD_UNMAP(pg_mid_dir); \
+-    }
+-#define NV_PTE_UNMAP(pte) \
+-    { \
+-        pte_kunmap(pte); \
+-    }
++#define NV_PTE_OFFSET(address, pmd)                     \
++   ({                                                   \
++        pte_t *__pte;                                   \
++        __pte = pte_offset_atomic(pmd, address);        \
++        NV_PMD_UNMAP(pmd); __pte;                       \
++    })
++#define NV_PTE_UNMAP(pte) pte_kunmap(pte);
+ #elif defined(pte_offset)
+-#define NV_PTE_OFFSET(addres, pg_mid_dir, pte) \
+-    { \
+-        pte = pte_offset(pg_mid_dir, address); \
+-        NV_PMD_UNMAP(pg_mid_dir); \
+-    }
++#define NV_PTE_OFFSET(address, pmd)                     \
++   ({                                                   \
++        pte_t *__pte;                                   \
++        __pte = pte_offset(pmd, address);               \
++        NV_PMD_UNMAP(pmd); __pte;                       \
++    })
+ #define NV_PTE_UNMAP(pte)
+ #else
+-#define NV_PTE_OFFSET(addres, pg_mid_dir, pte) \
+-    { \
+-        pte = pte_offset_map(pg_mid_dir, address); \
+-        NV_PMD_UNMAP(pg_mid_dir); \
+-    }
+-#define NV_PTE_UNMAP(pte) \
+-    { \
+-        pte_unmap(pte); \
+-    }
++#define NV_PTE_OFFSET(address, pmd)                     \
++   ({                                                   \
++        pte_t *__pte;                                   \
++        __pte = pte_offset_map(pmd, address);           \
++        NV_PMD_UNMAP(pmd); __pte;                       \
++    })
++#define NV_PTE_UNMAP(pte) pte_unmap(pte);
+ #endif
+ 
+-#define NV_PTE_PRESENT(pte) \
+-    ({ \
+-        if (pte) { \
+-            if (!pte_present(*pte)) { \
+-                NV_PTE_UNMAP(pte); pte = NULL; \
+-            } \
+-        } pte != NULL; \
++#define NV_PTE_PRESENT(pte)                             \
++   ({                                                   \
++        if ((pte != NULL) && !pte_present(*pte))        \
++        {                                               \
++            NV_PTE_UNMAP(pte);                          \
++            pte = NULL; /* mark invalid */              \
++        }                                               \
++        pte != NULL;                                    \
+     })
+ 
+-#define NV_PTE_VALUE(pte) \
+-    ({ \
+-        unsigned long __pte_value = pte_val(*pte); \
+-        NV_PTE_UNMAP(pte); \
+-        __pte_value; \
++#define NV_PTE_VALUE(pte)                               \
++   ({                                                   \
++        unsigned long __pte_value = pte_val(*pte);      \
++        NV_PTE_UNMAP(pte);                              \
++        __pte_value;                                    \
+     })
+ 
++
+ #define NV_PAGE_ALIGN(addr)             ( ((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
+ #define NV_MASK_OFFSET(addr)            ( (addr) & (PAGE_SIZE - 1) )
+ 
+@@ -729,12 +797,21 @@
+         return order;
+     }
+ 
++/* mark memory UC-, rather than UC (don't use _PAGE_PWT) */
++static inline pgprot_t pgprot_noncached_weak(pgprot_t old_prot)
++    {
++        pgprot_t new_prot = old_prot;
++        if (boot_cpu_data.x86 > 3)
++            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
++        return new_prot;
++    }
++
+ #if !defined (pgprot_noncached)
+ static inline pgprot_t pgprot_noncached(pgprot_t old_prot)
+     {
+         pgprot_t new_prot = old_prot;
+         if (boot_cpu_data.x86 > 3)
+-            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
++            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD | _PAGE_PWT);
+         return new_prot;
+     }
+ #endif
+@@ -776,6 +853,9 @@
+     unsigned long   phys_addr;
+     unsigned long   virt_addr;
+     dma_addr_t      dma_addr;
++#ifdef NV_SG_MAP_BUFFERS
++    struct scatterlist sg_list;
++#endif
+ #ifdef CONFIG_SWIOTLB
+     unsigned long   orig_phys_addr;
+     unsigned long   orig_virt_addr;
+@@ -789,15 +869,11 @@
+     unsigned int   num_pages;
+     unsigned int   order;
+     unsigned int   size;
+-    nv_pte_t      *page_table;          /* list of physical pages allocated */
++    nv_pte_t     **page_table;          /* list of physical pages allocated */
+     void          *key_mapping;         /* mapping used as a key for finding this nv_alloc_t */
+                                         /*   may be the same as page_table                   */
+     unsigned int   class;
+     void          *priv_data;
+-#if defined(NV_SG_MAP_BUFFERS)
+-    struct pci_dev *dev;
+-    struct scatterlist *sg_list;        /* list of physical pages allocated */
+-#endif
+ } nv_alloc_t;
+ 
+ 
+@@ -939,21 +1015,60 @@
+ #if defined(NV_SG_MAP_BUFFERS)
+ static inline int nv_sg_map_buffer(
+     struct pci_dev     *dev,
+-    struct scatterlist *sg_ptr,
++    nv_pte_t          **page_list,
+     void               *base,
+     unsigned int        num_pages
+ )
+ {
+-    int i;
++    struct scatterlist *sg_ptr = &page_list[0]->sg_list;
++    unsigned int i;
+ 
+     sg_ptr->page = virt_to_page(base);
+     sg_ptr->offset = (unsigned long)base & ~PAGE_MASK;
+     sg_ptr->length  = num_pages * PAGE_SIZE;
+ 
++#if defined(NV_SWIOTLB)
++    i = NV_SWIOTLB_MAX_RETRIES;
++    do {
++        if (pci_map_sg(dev, sg_ptr, 1, PCI_DMA_BIDIRECTIONAL) == 0)
++            return 1;
++
++        if (sg_ptr->dma_address & ~PAGE_MASK)
++        {
++            struct scatterlist sg_tmp;
++            pci_unmap_sg(dev, sg_ptr, num_pages, PCI_DMA_BIDIRECTIONAL);
++
++            memset(&sg_tmp, 0, sizeof(struct scatterlist));
++            sg_tmp.page = sg_ptr->page;
++            sg_tmp.offset = sg_ptr->offset;
++            sg_tmp.length = 2048;
++
++            if (pci_map_sg(dev, &sg_tmp, 1, PCI_DMA_BIDIRECTIONAL) == 0)
++                return 1;
++
++            if (pci_map_sg(dev, sg_ptr, 1, PCI_DMA_BIDIRECTIONAL) == 0)
++            {
++                pci_unmap_sg(dev, &sg_tmp, num_pages, PCI_DMA_BIDIRECTIONAL);
++                return 1;
++            }
++
++            pci_unmap_sg(dev, &sg_tmp, num_pages, PCI_DMA_BIDIRECTIONAL);
++        }
++    } while (i-- && sg_ptr->dma_address & ~PAGE_MASK);
++#else
+     if (pci_map_sg(dev, sg_ptr, 1, PCI_DMA_BIDIRECTIONAL) == 0)
+     {
+         return 1;
+     }
++#endif
++
++    if (sg_ptr->dma_address & ~PAGE_MASK)
++    {
++        nv_printf(NV_DBG_ERRORS,
++            "NVRM: VM: nv_sg_map_buffer: failed to obtain aligned mapping\n");
++        pci_unmap_sg(dev, sg_ptr, num_pages, PCI_DMA_BIDIRECTIONAL);
++        return 1;
++    }
+ 
+     NV_FIXUP_SWIOTLB_VIRT_ADDR_BUG(sg_ptr->dma_address);
+ 
+@@ -966,7 +1081,7 @@
+     // note we start with index 1, since index 0 is already correct
+     for (i = 1; i < num_pages; i++)
+     {
+-        sg_ptr[i].dma_address = sg_ptr[0].dma_address + (i * PAGE_SIZE);
++        page_list[i]->sg_list.dma_address = sg_ptr->dma_address + (i * PAGE_SIZE);
+     }
+ 
+     return 0;
+diff -ruN nvidia-kernel.orig/nv/nv-linux.h.orig nvidia-kernel/nv/nv-linux.h.orig
+--- nvidia-kernel.orig/nv/nv-linux.h.orig	1969-12-31 16:00:00.000000000 -0800
++++ nvidia-kernel/nv/nv-linux.h.orig	2005-02-18 15:08:04.602166384 -0800
+@@ -0,0 +1,1165 @@
++/* _NVRM_COPYRIGHT_BEGIN_
++ *
++ * Copyright 2001 by NVIDIA Corporation.  All rights reserved.  All
++ * information contained herein is proprietary and confidential to NVIDIA
++ * Corporation.  Any use, reproduction, or disclosure without the written
++ * permission of NVIDIA Corporation is prohibited.
++ *
++ * _NVRM_COPYRIGHT_END_
++ */
++
++
++#ifndef _NV_LINUX_H_
++#define _NV_LINUX_H_
++
++#include "nv.h"
++
++#include <linux/config.h>
++#include <linux/version.h>
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
++#  error This driver does not support pre-2.4 kernels!
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
++#  define KERNEL_2_4
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
++#  error This driver does not support 2.5 kernels!
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0)
++#  define KERNEL_2_6
++#else
++#  error This driver does not support development kernels!
++#endif
++
++#if defined (__ia64)
++#  if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 13)
++#    error This driver does not support 2.4.12 or earlier kernels!
++#  endif
++#endif
++
++#if defined (CONFIG_SMP) && !defined (__SMP__)
++#define __SMP__
++#endif
++
++#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS)
++#  define MODVERSIONS
++#endif
++
++#if defined (MODVERSIONS) && !defined (KERNEL_2_6)
++#include <linux/modversions.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++
++#include <linux/init.h>             /* module_init, module_exit         */
++#include <linux/types.h>            /* pic_t, size_t, __u32, etc        */
++#include <linux/errno.h>            /* error codes                      */
++#include <linux/list.h>             /* circular linked list             */
++#include <linux/stddef.h>           /* NULL, offsetof                   */
++#include <linux/wait.h>             /* wait queues                      */
++
++#include <linux/slab.h>             /* kmalloc, kfree, etc              */
++#include <linux/vmalloc.h>          /* vmalloc, vfree, etc              */
++
++#include <linux/poll.h>             /* poll_wait                        */
++#include <linux/delay.h>            /* mdelay, udelay                   */
++
++#ifdef KERNEL_2_6
++#include <linux/sched.h>            /* suser(), capable() replacement   */
++#include <linux/moduleparam.h>      /* module_param()                   */
++#include <linux/smp_lock.h>         /* kernel_locked                    */
++#include <asm/tlbflush.h>           /* flush_tlb(), flush_tlb_all()     */
++#include <asm/kmap_types.h>         /* page table entry lookup          */
++#endif
++
++#include <linux/pci.h>              /* pci_find_class, etc              */
++#include <linux/interrupt.h>        /* tasklets, interrupt helpers      */
++#include <linux/timer.h>
++
++#include <asm/system.h>             /* cli, sli, save_flags             */
++#include <asm/io.h>                 /* ioremap, virt_to_phys            */
++#include <asm/uaccess.h>            /* access_ok                        */
++#include <asm/page.h>               /* PAGE_OFFSET                      */
++#include <asm/pgtable.h>            /* pte bit definitions              */
++
++#if defined(NVCPU_X86_64) && defined(KERNEL_2_6)
++#include <linux/syscalls.h>         /* sys_ioctl()                      */
++#include <linux/ioctl32.h>          /* register_ioctl32_conversion()    */
++#endif
++
++#if defined(NVCPU_X86_64) && defined(KERNEL_2_4)
++#include <asm/ioctl32.h>            /* sys_ioctl() (ioctl32)            */
++#endif
++
++#include <linux/spinlock.h>
++#include <asm/semaphore.h>
++#include <linux/highmem.h>
++
++#ifdef CONFIG_PROC_FS
++#include <linux/proc_fs.h>
++#endif
++
++#ifdef CONFIG_DEVFS_FS
++#include <linux/devfs_fs_kernel.h>
++#endif
++
++#ifdef CONFIG_KMOD
++#include <linux/kmod.h>
++#endif
++
++#ifdef CONFIG_PM
++#include <linux/pm.h>
++#endif
++
++#ifdef CONFIG_MTRR
++#include <asm/mtrr.h>
++#endif
++
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#include <asm/kdb.h>
++#endif
++
++#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE)
++#ifndef NOAGPGART
++#  define AGPGART
++#  include <linux/agp_backend.h>
++#  include <linux/agpgart.h>
++#endif
++#endif
++
++#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
++#define NV_BUILD_NV_PAT_SUPPORT 1
++#endif
++
++#if defined(NV_BUILD_NV_PAT_SUPPORT)
++#include "pat.h"
++#endif
++
++#if defined(NVCPU_X86)
++#ifndef write_cr4
++#define write_cr4(x) __asm__ ("movl %0,%%cr4" :: "r" (x));
++#endif
++
++#ifndef read_cr4
++#define read_cr4()                                  \
++ ({                                                 \
++      unsigned int __cr4;                           \
++      __asm__ ("movl %%cr4,%0" : "=r" (__cr4));     \
++      __cr4;                                        \
++  })
++#endif
++
++#ifndef wbinvd
++#define wbinvd() __asm__ __volatile__("wbinvd" ::: "memory");
++#endif
++#endif /* defined(NVCPU_X86) */
++
++#ifndef get_cpu
++#define get_cpu() smp_processor_id()
++#define put_cpu()
++#endif
++
++#if !defined (list_for_each)
++#define list_for_each(pos, head) \
++        for (pos = (head)->next; pos != (head); pos = (pos)->next)
++#endif
++
++#ifdef EXPORT_NO_SYMBOLS
++/* don't clutter the kernel namespace with our symbols */
++EXPORT_NO_SYMBOLS;
++#endif
++
++#if !defined(DEBUG) && defined(__GFP_NOWARN)
++#define NV_GFP_KERNEL (GFP_KERNEL | __GFP_NOWARN)
++#define NV_GFP_ATOMIC (GFP_ATOMIC | __GFP_NOWARN)
++#else
++#define NV_GFP_KERNEL (GFP_KERNEL)
++#define NV_GFP_ATOMIC (GFP_ATOMIC)
++#endif
++
++#if defined(NVCPU_IA64)
++#define NV_GFP_DMA32 (NV_GFP_KERNEL | __GFP_DMA)
++#define CACHE_FLUSH()
++#else
++#define NV_GFP_DMA32 (NV_GFP_KERNEL)
++#define CACHE_FLUSH()  asm volatile("wbinvd":::"memory")
++#endif
++
++#ifndef NVWATCH
++
++/* various memory tracking/debugging techniques
++ * disabled for retail builds, enabled for debug builds
++ */
++
++// allow an easy way to convert all debug printfs related to memory
++// management back and forth between 'info' and 'errors'
++#if defined(NV_DBG_MEM)
++#define NV_DBG_MEMINFO NV_DBG_ERRORS
++#else
++#define NV_DBG_MEMINFO NV_DBG_INFO
++#endif
++
++#ifdef DEBUG
++#define NV_ENABLE_MEM_TRACKING 1
++#endif
++
++#if NV_ENABLE_MEM_TRACKING
++#define NV_MEM_TRACKING_PAD_SIZE(size)   ((size) += sizeof(void *))
++#define NV_MEM_TRACKING_HIDE_SIZE(ptr, size)            \
++    if ((ptr) && *(ptr)) {                              \
++        U008 *__ptr;                                    \
++        *(unsigned long *) *(ptr) = (size);             \
++        __ptr = *(ptr); __ptr += sizeof(void *);        \
++        *(ptr) = (void *) __ptr;                        \
++    }
++#define NV_MEM_TRACKING_RETRIEVE_SIZE(ptr, size)        \
++    {                                                   \
++        U008 *__ptr = (ptr); __ptr -= sizeof(void *);   \
++        (ptr) = (void *) __ptr;                         \
++        size = *(unsigned long *) (ptr);                \
++    }
++#else
++#define NV_MEM_TRACKING_PAD_SIZE(size)
++#define NV_MEM_TRACKING_HIDE_SIZE(ptr, size)
++#define NV_MEM_TRACKING_RETRIEVE_SIZE(ptr, size)  ((size) = 0)
++#endif
++
++
++/* poor man's memory allocation tracker.
++ * main intention is just to see how much memory is being used to recognize
++ * when memory usage gets out of control or if memory leaks are happening
++ */
++
++/* keep track of memory usage */
++#if NV_ENABLE_MEM_TRACKING
++extern int vm_usage;
++extern int km_usage;
++extern int fp_usage;
++
++/* print out a running tally of memory allocation amounts, disabled by default */
++// #define POOR_MANS_MEM_CHECK 1
++
++
++/* slightly more advanced memory allocation tracker.
++ * track who's allocating memory and print out a list of currently allocated
++ * memory at key points in the driver
++ */
++
++#define MEMDBG_ALLOC(a,b) (a = kmalloc(b, NV_GFP_ATOMIC))
++#define MEMDBG_FREE(a)    (kfree(a))
++
++#include "nv-memdbg.h"
++
++#undef MEMDBG_ALLOC
++#undef MEMDBG_FREE
++
++extern struct mem_track_t *vm_list;
++extern struct mem_track_t *km_list;
++extern struct mem_track_t *fp_list;
++
++/* print out list of memory allocations */
++/* default to enabled for now */
++#define LIST_MEM_CHECK 1
++
++/* decide which memory types to apply mem trackers to */
++#define VM_CHECKER 1
++#define KM_CHECKER 1
++#define FP_CHECKER 1
++
++#endif  /* NV_ENABLE_MEM_TRACKING */
++
++#define VM_PRINT(str, args...)
++#define KM_PRINT(str, args...)
++#define FP_PRINT(str, args...)
++
++#define VM_ADD_MEM(a,b,c,d)
++#define VM_FREE_MEM(a,b,c,d)
++#define KM_ADD_MEM(a,b,c,d)
++#define KM_FREE_MEM(a,b,c,d)
++#define FP_ADD_MEM(a,b,c,d)
++#define FP_FREE_MEM(a,b,c,d)
++
++#define VM_ALLOC_RECORD(a,b,c)
++#define VM_FREE_RECORD(a,b,c)
++#define KM_ALLOC_RECORD(a,b,c)
++#define KM_FREE_RECORD(a,b,c)
++
++
++#if defined(VM_CHECKER)
++#  if defined(POOR_MANS_MEM_CHECK)
++#    undef  VM_PRINT
++#    define VM_PRINT(str, args...)   printk(str, ##args)
++#  endif
++#  if defined(LIST_MEM_CHECK)
++#    undef  VM_ADD_MEM
++#    define VM_ADD_MEM(a,b,c,d)      nv_add_mem(&vm_list, a, b, c, d)
++#    undef  VM_FREE_MEM
++#    define VM_FREE_MEM(a,b,c,d)     nv_free_mem(&vm_list, a, b, c, d)
++#  endif
++#  undef VM_ALLOC_RECORD
++#  define VM_ALLOC_RECORD(ptr, size, name) \
++      if (ptr) { \
++            vm_usage += size; \
++            VM_PRINT("%s (0x%x: 0x%x): vm_usage is now 0x%x\n", \
++                name, ptr, size, vm_usage); \
++            VM_ADD_MEM(ptr, size, __FILE__, __LINE__); \
++        }
++#  undef VM_FREE_RECORD
++#  define VM_FREE_RECORD(ptr, size, name) \
++        if (ptr) { \
++            vm_usage -= size; \
++            VM_PRINT("%s (0x%x: 0x%x): vm_usage is now 0x%x\n", \
++                name, ptr, size, vm_usage); \
++            VM_FREE_MEM(ptr, size, __FILE__, __LINE__); \
++        }
++#endif
++
++#if defined(KM_CHECKER)
++#  if defined(POOR_MANS_MEM_CHECK)
++#    undef  KM_PRINT
++#    define KM_PRINT(str, args...)   printk(str, ##args)
++#  endif
++#  if defined(LIST_MEM_CHECK)
++#    undef  KM_ADD_MEM
++#    define KM_ADD_MEM(a,b,c,d)      nv_add_mem(&km_list, a, b, c, d)
++#    undef  KM_FREE_MEM
++#    define KM_FREE_MEM(a,b,c,d)     nv_free_mem(&km_list, a, b, c, d)
++#  endif
++#  undef KM_ALLOC_RECORD
++#  define KM_ALLOC_RECORD(ptr, size, name) \
++      if (ptr) { \
++            km_usage += size; \
++            KM_PRINT("%s (0x%x: 0x%x): km_usage is now 0x%x\n", \
++                name, ptr, size, km_usage); \
++            KM_ADD_MEM(ptr, size, __FILE__, __LINE__); \
++        }
++#  undef KM_FREE_RECORD
++#  define KM_FREE_RECORD(ptr, size, name) \
++        if (ptr) { \
++            km_usage -= size; \
++            KM_PRINT("%s (0x%x: 0x%x): km_usage is now 0x%x\n", \
++                name, ptr, size, km_usage); \
++            KM_FREE_MEM(ptr, size, __FILE__, __LINE__); \
++        }
++#endif
++
++#if defined(FP_CHECKER)
++#  if defined(POOR_MANS_MEM_CHECK)
++#    undef  FP_PRINT
++#    define FP_PRINT(str, args...)   printk(str, ##args)
++#  endif
++#if 0  // I had some problems tracking fp mem, so disable for now
++#  if defined(LIST_MEM_CHECK)
++#    undef  FP_ADD_MEM
++#    define FP_ADD_MEM(a,b,c,d)      nv_add_mem(&fp_list, a, b, c, d)
++#    undef  FP_FREE_MEM
++#    define FP_FREE_MEM(a,b,c,d)     nv_free_mem(&fp_list, a, b, c, d)
++#  endif
++#endif
++#endif
++
++/* NV_VMALLOC has to allocate memory under the 4GB mark suitable for
++ * DMA use with the hardware.  This has to be done differently for
++ * different platforms and kernel versions.
++ */
++#if defined(NVCPU_IA64)
++#define NV_VMALLOC(ptr, size) \
++    { \
++        (ptr) = vmalloc_dma(size); \
++        VM_ALLOC_RECORD(ptr, size, "vm_alloc"); \
++    }
++#else
++#define NV_VMALLOC(ptr, size) \
++    { \
++        (ptr) = vmalloc_32(size); \
++        VM_ALLOC_RECORD(ptr, size, "vm_alloc"); \
++    }
++#endif
++
++#define NV_VFREE(ptr, size) \
++    { \
++        VM_FREE_RECORD(ptr, size, "vm_alloc"); \
++        vfree((void *) (ptr)); \
++    }
++
++#define NV_IOREMAP(ptr, physaddr, size) \
++    { \
++        (ptr) = ioremap(physaddr, size); \
++        VM_ALLOC_RECORD(ptr, size, "vm_ioremap"); \
++    }
++
++#define NV_IOREMAP_NOCACHE(ptr, physaddr, size) \
++    { \
++        (ptr) = ioremap_nocache(physaddr, size); \
++        VM_ALLOC_RECORD(ptr, size, "vm_ioremap_nocache"); \
++    }
++
++#define NV_IOUNMAP(ptr, size) \
++    { \
++        VM_FREE_RECORD(ptr, size, "vm_iounmap"); \
++        iounmap(ptr); \
++    }
++
++/* only use this because GFP_KERNEL may sleep..
++ * GFP_ATOMIC is ok, it won't sleep
++ */
++#define NV_KMALLOC(ptr, size) \
++    { \
++        (ptr) = kmalloc(size, NV_GFP_KERNEL); \
++        KM_ALLOC_RECORD(ptr, size, "km_alloc"); \
++    }
++
++#define NV_KMALLOC_ATOMIC(ptr, size) \
++    { \
++        (ptr) = kmalloc(size, NV_GFP_ATOMIC); \
++        KM_ALLOC_RECORD(ptr, size, "km_alloc_atomic"); \
++    }  
++
++
++#define NV_KFREE(ptr, size) \
++    { \
++        KM_FREE_RECORD(ptr, size, "km_free"); \
++        kfree((void *) (ptr)); \
++    }
++
++#define NV_GET_FREE_PAGES(ptr, order) \
++    { \
++        (ptr) = __get_free_pages(NV_GFP_DMA32, order); \
++    }
++        
++#define NV_FREE_PAGES(ptr, order) \
++    { \
++        free_pages(ptr, order); \
++    }
++
++#define NV_KMEM_CACHE_CREATE(kmem_cache, name, type)            \
++    {                                                           \
++        kmem_cache = kmem_cache_create(name, sizeof(type),      \
++                        0, 0, NULL, NULL);                      \
++    } 
++
++#define NV_KMEM_CACHE_DESTROY(kmem_cache)                       \
++    {                                                           \
++        kmem_cache_destroy(kmem_cache);                         \
++        kmem_cache = NULL;                                      \
++    } 
++
++#define NV_KMEM_CACHE_ALLOC(ptr, kmem_cache, type)              \
++    {                                                           \
++        (ptr) = kmem_cache_alloc(kmem_cache, GFP_KERNEL);       \
++        KM_ALLOC_RECORD(ptr, sizeof(type), "km_cache_alloc");   \
++    } 
++
++#define NV_KMEM_CACHE_FREE(ptr, type, kmem_cache)               \
++    {                                                           \
++        KM_FREE_RECORD(ptr, sizeof(type), "km_cache_free");     \
++        kmem_cache_free(kmem_cache, ptr);                       \
++    } 
++
++#endif /* !defined NVWATCH */
++
++
++#if defined (KERNEL_2_4)
++#  define NV_IS_SUSER()                 suser()
++#  define NV_PCI_DEVICE_NAME(dev)       ((dev)->name)
++#  define NV_NUM_CPUS()                 smp_num_cpus
++#  define NV_CLI()                      __cli()
++#  define NV_SAVE_FLAGS(eflags)         __save_flags(eflags)
++#  define NV_RESTORE_FLAGS(eflags)      __restore_flags(eflags)
++#  define NV_MAY_SLEEP()                (!in_interrupt())
++#  define NV_MODULE_PARAMETER(x)        MODULE_PARM(x, "i")
++#endif
++
++#if defined (KERNEL_2_6)
++#  define NV_IS_SUSER()                 capable(CAP_SYS_ADMIN)
++#  define NV_PCI_DEVICE_NAME(dev)       ((dev)->pretty_name)
++#  define NV_NUM_CPUS()                 num_online_cpus()
++#  define NV_CLI()                      local_irq_disable()
++#  define NV_SAVE_FLAGS(eflags)         local_save_flags(eflags)
++#  define NV_RESTORE_FLAGS(eflags)      local_irq_restore(eflags)
++#  define NV_MAY_SLEEP()                (!in_interrupt() && !in_atomic())
++#  define NV_MODULE_PARAMETER(x)        module_param(x, int, 0)
++
++   // the following macro causes problems when used in the same module
++   // as module_param(); undef it so we don't accidentally mix the two
++#  undef  MODULE_PARM
++#endif
++
++   // Earlier 2.4.x kernels don't have pci_disable_device()
++#ifdef NV_PCI_DISABLE_DEVICE_PRESENT
++#define NV_PCI_DISABLE_DEVICE(dev)      pci_disable_device(dev)
++#else
++#define NV_PCI_DISABLE_DEVICE(dev)
++#endif
++
++/* common defines */
++#define GET_MODULE_SYMBOL(mod,sym)    (const void *) inter_module_get(sym)
++#define PUT_MODULE_SYMBOL(sym)        inter_module_put((char *) sym)
++
++#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
++#define NV_VMA_OFFSET(vma)            (((vma)->vm_pgoff) << PAGE_SHIFT)
++#define NV_VMA_PRIVATE(vma)           ((vma)->vm_private_data)
++
++#define NV_DEVICE_NUMBER(x)           minor((x)->i_rdev)
++#define NV_IS_CONTROL_DEVICE(x)       (minor((x)->i_rdev) == 255)
++
++#define NV_PCI_RESOURCE_START(dev, bar) ((dev)->resource[(bar) - 1].start)
++#define NV_PCI_RESOURCE_SIZE(dev, bar)  ((dev)->resource[(bar) - 1].end - (dev)->resource[(bar) - 1].start + 1)
++
++#define NV_PCI_BUS_NUMBER(dev)        (dev)->bus->number
++#define NV_PCI_DEVFN(dev)             (dev)->devfn
++#define NV_PCI_SLOT_NUMBER(dev)       PCI_SLOT(NV_PCI_DEVFN(dev))
++
++#ifdef NV_PCI_GET_CLASS_PRESENT
++#define NV_PCI_DEV_PUT(dev)                    pci_dev_put(dev)
++#define NV_PCI_GET_DEVICE(vendor,device,from)  pci_get_device(vendor,device,from)
++#define NV_PCI_GET_SLOT(bus,devfn)                                       \
++   ({                                                                    \
++        struct pci_dev *__dev = NULL;                                    \
++        while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, __dev)))  \
++        {                                                                \
++            if (NV_PCI_BUS_NUMBER(__dev) == bus                          \
++                    && NV_PCI_DEVFN(__dev) == devfn) break;              \
++        }                                                                \
++        __dev;                                                           \
++    })
++#define NV_PCI_GET_CLASS(class,from)           pci_get_class(class,from)
++#else
++#define NV_PCI_DEV_PUT(dev)
++#define NV_PCI_GET_DEVICE(vendor,device,from)  pci_find_device(vendor,device,from)
++#define NV_PCI_GET_SLOT(bus,devfn)             pci_find_slot(bus,devfn)
++#define NV_PCI_GET_CLASS(class,from)           pci_find_class(class,from)
++#endif
++
++#if defined(DEBUG)
++#define NV_PRINT_AT(at)                                                    \
++    nv_printf(NV_DBG_ERRORS, "nvidia lost alloc_t: %4d  %2d  %05x 0x%08x 0x%08x\n", \
++        at->num_pages, at->usage_count, \
++        at->flags, at->page_table, at->key_mapping)
++#else
++#define NV_PRINT_AT(at)
++#endif
++
++// acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver
++// model is not sufficient for full acpi support. it may work in some cases,
++// but not enough for us to officially support this configuration.
++#if defined(CONFIG_ACPI) && defined(KERNEL_2_6)
++#define NV_PM_SUPPORT_ACPI
++#endif
++
++#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
++#define NV_PM_SUPPORT_APM
++#endif
++
++/* add support for iommu.
++ * on x86_64 platforms, this uses the gart to remap pages that are > 32-bits
++ * to < 32-bits.
++ */
++#if defined(NVCPU_X86_64)
++#define NV_SG_MAP_BUFFERS 1
++#endif
++
++/* add support for software i/o tlb support.
++ * normally, you'd expect this to be transparent, but unfortunately this is not
++ * the case. for starters, the sw io tlb is a pool of pre-allocated pages that
++ * are < 32-bits. when we ask to remap a page through this sw io tlb, we are
++ * returned one of these pages, which means we have 2 different pages, rather
++ * than 2 mappings to the same page. secondly, this pre-allocated pool is very
++ * tiny, and the kernel panics when it is exhausted. try to warn the user that
++ * they need to boost the size of their pool.
++ */
++#if defined(CONFIG_SWIOTLB)
++#define NV_SWIOTLB 1
++#define NV_SWIOTLB_MAX_RETRIES 16
++extern int nv_swiotlb;
++#endif
++
++/*
++ * early 2.6 kernels changed their swiotlb codepath, running into a
++ * latent bug that returns virtual addresses when it should return
++ * physical addresses. we try to gracefully account for that, by 
++ * comparing the returned address to what should be it's virtual
++ * equivalent. this should hopefully account for when the bug is 
++ * fixed in the core kernel.
++ */
++#if defined(NV_SWIOTLB) && defined(KERNEL_2_6)
++#define NV_FIXUP_SWIOTLB_VIRT_ADDR_BUG(dma_addr) \
++    if ((dma_addr) == ((dma_addr) | PAGE_OFFSET)) \
++        (dma_addr) = __pa((dma_addr))
++#else
++#define NV_FIXUP_SWIOTLB_VIRT_ADDR_BUG(dma_addr)
++#endif
++
++#ifndef minor
++# define minor(x) MINOR(x)
++#endif
++
++#ifndef IRQ_HANDLED
++typedef void irqreturn_t;
++#define IRQ_HANDLED
++#endif
++
++#ifndef PCI_CAP_ID_EXP
++#define PCI_CAP_ID_EXP 0x10
++#endif
++
++#if defined(KERNEL_2_6) && defined(AGPGART)
++typedef struct agp_kern_info agp_kern_info;
++typedef struct agp_memory agp_memory;
++#endif
++
++#if defined(CONFIG_DEVFS_FS)
++#  if defined(KERNEL_2_6)
++     typedef void* devfs_handle_t;
++#    define NV_DEVFS_REGISTER(_name, _minor)                             \
++     ({                                                                  \
++         devfs_handle_t __handle = NULL;                                 \
++         if (devfs_mk_cdev(MKDEV(NV_MAJOR_DEVICE_NUMBER, _minor),        \
++                 S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0)               \
++            __handle = (void *) 1; /* XXX Fix me! (boolean) */           \
++         __handle;                                                       \
++     })
++
++#    define NV_DEVFS_REMOVE_DEVICE(i) devfs_remove("nvidia%d", i)
++#    define NV_DEVFS_REMOVE_CONTROL() devfs_remove("nvidiactl")
++#  else // defined(KERNEL_2_4)
++#    define NV_DEVFS_REGISTER(_name, _minor)                             \
++     ({                                                                  \
++         devfs_handle_t __handle =                                       \
++             devfs_register(NULL, _name, DEVFS_FL_DEFAULT,               \
++                     NV_MAJOR_DEVICE_NUMBER, _minor,                     \
++                     S_IFCHR | S_IRUGO | S_IWUGO, &nv_fops, NULL);       \
++         __handle;                                                       \
++     })
++
++#    define NV_DEVFS_REMOVE_DEVICE(i)                                    \
++     ({                                                                  \
++         if (nv_devfs_handles[i+1] != NULL)                              \
++             devfs_unregister(nv_devfs_handles[i+1]);                    \
++      })
++#    define NV_DEVFS_REMOVE_CONTROL()                                    \
++     ({                                                                  \
++         if (nv_devfs_handles[0] != NULL)                                \
++             devfs_unregister(nv_devfs_handles[0]);                      \
++      })
++#  endif // defined(KERNEL_2_4)
++#endif // defined(CONFIG_DEVFS_FS)
++
++#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6)
++#define NV_REGISTER_CHRDEV(x...)    devfs_register_chrdev(x)
++#define NV_UNREGISTER_CHRDEV(x...)  devfs_unregister_chrdev(x)
++#else
++#define NV_REGISTER_CHRDEV(x...)    register_chrdev(x)
++#define NV_UNREGISTER_CHRDEV(x...)  unregister_chrdev(x)
++#endif
++
++#if defined(NV_REMAP_PFN_RANGE_PRESENT)
++#define NV_REMAP_PAGE_RANGE(from, offset, x...) \
++     remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)
++#elif defined(NV_REMAP_PAGE_RANGE_5)
++#define NV_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x)
++#elif defined(NV_REMAP_PAGE_RANGE_4)
++#define NV_REMAP_PAGE_RANGE(x...) remap_page_range(x)
++#else
++#warning "conftest.sh failed, assuming remap_page_range(4)!"
++#define NV_REMAP_PAGE_RANGE(x...) remap_page_range(x)
++#endif
++
++
++#define NV_PGD_OFFSET(address, kernel, mm)              \
++   ({                                                   \
++        pgd_t *__pgd;                                   \
++        if (!kernel)                                    \
++            __pgd = pgd_offset(mm, address);            \
++        else                                            \
++            __pgd = pgd_offset_k(address);              \
++        __pgd;                                          \
++    })
++
++#define NV_PGD_PRESENT(pgd)                             \
++   ({                                                   \
++         if ((pgd != NULL) &&                           \
++             (pgd_bad(*pgd) || pgd_none(*pgd)))         \
++            /* static */ pgd = NULL;                    \
++         pgd != NULL;                                   \
++    })
++
++#if defined(pmd_offset_map)
++#define NV_PMD_OFFSET(address, pgd)                     \
++   ({                                                   \
++        pmd_t *__pmd;                                   \
++        __pmd = pmd_offset_map(pgd, address);           \
++   })
++#define NV_PMD_UNMAP(pmd) pmd_unmap(pmd);
++#else
++#if defined(PUD_SHIFT) /* 4-level pgtable */
++#define NV_PMD_OFFSET(address, pgd)                     \
++   ({                                                   \
++        pmd_t *__pmd = NULL;                            \
++        pud_t *__pud;                                   \
++        __pud = pud_offset(pgd, address);               \
++        if ((__pud != NULL) &&                          \
++            !(pud_bad(*__pud) || pud_none(*__pud)))     \
++            __pmd = pmd_offset(__pud, address);         \
++        __pmd;                                          \
++    })
++#else /* 3-level pgtable */
++#define NV_PMD_OFFSET(address, pgd)                     \
++   ({                                                   \
++        pmd_t *__pmd;                                   \
++        __pmd = pmd_offset(pgd, address);               \
++    })
++#endif
++#define NV_PMD_UNMAP(pmd)
++#endif
++
++#define NV_PMD_PRESENT(pmd)                             \
++   ({                                                   \
++        if ((pmd != NULL) &&                            \
++            (pmd_bad(*pmd) || pmd_none(*pmd)))          \
++        {                                               \
++            NV_PMD_UNMAP(pmd);                          \
++            pmd = NULL; /* mark invalid */              \
++        }                                               \
++        pmd != NULL;                                    \
++    })
++
++#if defined(pte_offset_atomic)
++#define NV_PTE_OFFSET(address, pmd)                     \
++   ({                                                   \
++        pte_t *__pte;                                   \
++        __pte = pte_offset_atomic(pmd, address);        \
++        NV_PMD_UNMAP(pmd); __pte;                       \
++    })
++#define NV_PTE_UNMAP(pte) pte_kunmap(pte);
++#elif defined(pte_offset)
++#define NV_PTE_OFFSET(address, pmd)                     \
++   ({                                                   \
++        pte_t *__pte;                                   \
++        __pte = pte_offset(pmd, address);               \
++        NV_PMD_UNMAP(pmd); __pte;                       \
++    })
++#define NV_PTE_UNMAP(pte)
++#else
++#define NV_PTE_OFFSET(address, pmd)                     \
++   ({                                                   \
++        pte_t *__pte;                                   \
++        __pte = pte_offset_map(pmd, address);           \
++        NV_PMD_UNMAP(pmd); __pte;                       \
++    })
++#define NV_PTE_UNMAP(pte) pte_unmap(pte);
++#endif
++
++#define NV_PTE_PRESENT(pte)                             \
++   ({                                                   \
++        if ((pte != NULL) && !pte_present(*pte))        \
++        {                                               \
++            NV_PTE_UNMAP(pte);                          \
++            pte = NULL; /* mark invalid */              \
++        }                                               \
++        pte != NULL;                                    \
++    })
++
++#define NV_PTE_VALUE(pte)                               \
++   ({                                                   \
++        unsigned long __pte_value = pte_val(*pte);      \
++        NV_PTE_UNMAP(pte);                              \
++        __pte_value;                                    \
++    })
++
++
++#define NV_PAGE_ALIGN(addr)             ( ((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
++#define NV_MASK_OFFSET(addr)            ( (addr) & (PAGE_SIZE - 1) )
++
++#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
++/* this isn't defined in some older kernel header files */
++#define NV_CPU_INTERRUPT_FLAGS_BIT (1<<9)
++#elif defined(NVCPU_IA64)
++/* For whatever reason this is not defined an any header file I could
++ * find.  From Intel IA64 Architecture Software Developers Manual Volume 2: 
++ * IA64 System Architecture page 3-7 we have:
++ */
++#define NV_CPU_INTERRUPT_FLAGS_BIT (1<<14)
++#else
++#error define NV_CPU_INTERRUPT_FLAGS_BIT
++#endif
++
++static inline int NV_IRQL_IS_RAISED(void)
++    {
++        unsigned long int eflags;
++        NV_SAVE_FLAGS(eflags);
++        return !(eflags & NV_CPU_INTERRUPT_FLAGS_BIT);
++    }
++ 
++static inline int nv_calc_order(int size)
++    {
++        int order = 0;
++        while ( ((1 << order) * PAGE_SIZE) < (size))
++        {
++            order++;
++        }
++        return order;
++    }
++
++/* mark memory UC-, rather than UC (don't use _PAGE_PWT) */
++static inline pgprot_t pgprot_noncached_weak(pgprot_t old_prot)
++    {
++        pgprot_t new_prot = old_prot;
++        if (boot_cpu_data.x86 > 3)
++            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
++        return new_prot;
++    }
++
++#if !defined (pgprot_noncached)
++static inline pgprot_t pgprot_noncached(pgprot_t old_prot)
++    {
++        pgprot_t new_prot = old_prot;
++        if (boot_cpu_data.x86 > 3)
++            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD | _PAGE_PWT);
++        return new_prot;
++    }
++#endif
++
++#if defined(NV_BUILD_NV_PAT_SUPPORT) && !defined (pgprot_writecombined)
++static inline pgprot_t pgprot_writecombined(pgprot_t old_prot)
++    {
++        pgprot_t new_prot = old_prot;
++        if (boot_cpu_data.x86 > 3)
++        {
++            pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT);
++            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB);
++        }
++        return new_prot;
++    }
++#endif
++
++#if !defined(page_to_pfn)
++#define page_to_pfn(page)  ((page) - mem_map)
++#endif
++
++/* On IA64 physical memory is partitioned into a cached and an
++ * uncached view controlled by bit 63.  Set this bit when remapping
++ * page ranges.  
++ */
++#if defined(NVCPU_IA64)
++#define phys_to_uncached(addr) ((addr) | ((unsigned long) 1<<63))
++#else
++/* Some other scheme must be used on this platform */
++#define phys_to_uncached(addr) (addr)
++#endif
++
++/*
++ * An allocated bit of memory using NV_MEMORY_ALLOCATION_OFFSET
++ *   looks like this in the driver
++ */
++
++typedef struct nv_pte_t {
++    unsigned long   phys_addr;
++    unsigned long   virt_addr;
++    dma_addr_t      dma_addr;
++#ifdef NV_SG_MAP_BUFFERS
++    struct scatterlist sg_list;
++#endif
++#ifdef CONFIG_SWIOTLB
++    unsigned long   orig_phys_addr;
++    unsigned long   orig_virt_addr;
++#endif
++} nv_pte_t;
++
++typedef struct nv_alloc_s {
++    struct nv_alloc_s *next;    
++    atomic_t       usage_count;
++    unsigned int   flags;
++    unsigned int   num_pages;
++    unsigned int   order;
++    unsigned int   size;
++    nv_pte_t     **page_table;          /* list of physical pages allocated */
++    void          *key_mapping;         /* mapping used as a key for finding this nv_alloc_t */
++                                        /*   may be the same as page_table                   */
++    unsigned int   class;
++    void          *priv_data;
++} nv_alloc_t;
++
++
++#define NV_ALLOC_TYPE_PCI      (1<<0)
++#define NV_ALLOC_TYPE_AGP      (1<<1)
++#define NV_ALLOC_TYPE_CONTIG   (1<<2)
++#define NV_ALLOC_TYPE_KERNEL   (1<<3)
++#define NV_ALLOC_TYPE_VMALLOC  (1<<4)
++
++#define NV_ALLOC_MAPPING_SHIFT      16
++#define NV_ALLOC_MAPPING(flags)     (((flags)>>NV_ALLOC_MAPPING_SHIFT)&0xff)
++#define NV_ALLOC_ENC_MAPPING(flags) ((flags)<<NV_ALLOC_MAPPING_SHIFT)
++
++#define NV_ALLOC_MAPPING_CACHED(flags) ((NV_ALLOC_MAPPING(flags) == NV_MEMORY_DEFAULT) || \
++                                        (NV_ALLOC_MAPPING(flags) == NV_MEMORY_WRITEBACK))
++
++#define NV_ALLOC_MAPPING_CONTIG(flags) ((flags) & NV_ALLOC_TYPE_CONTIG)
++#define NV_ALLOC_MAPPING_VMALLOC(flags) ((flags) & NV_ALLOC_TYPE_VMALLOC)
++
++static inline U032 nv_alloc_init_flags(int cached, int agp, int contig, int kernel)
++{
++    U032 flags = NV_ALLOC_ENC_MAPPING(cached);
++    if (agp)    flags |= NV_ALLOC_TYPE_AGP;
++    else        flags |= NV_ALLOC_TYPE_PCI;
++    if (kernel) flags |= NV_ALLOC_TYPE_KERNEL; 
++    if (kernel && !contig) flags |= NV_ALLOC_TYPE_VMALLOC;
++    if (contig && !agp) flags |= NV_ALLOC_TYPE_CONTIG;
++    return flags;
++}
++
++/* linux-specific version of old nv_state_t */
++/* this is a general os-specific state structure. the first element *must* be
++   the general state structure, for the generic unix-based code */
++typedef struct {
++    nv_state_t nv_state;
++    atomic_t usage_count;
++
++    struct pci_dev *dev;
++    nv_alloc_t *alloc_queue;
++
++    /* keep track of any pending bottom halfes */
++    struct tasklet_struct tasklet;
++
++    /* get a timer callback every second */
++    struct timer_list rc_timer;
++
++    /* per-device locking mechanism for access to core rm */
++    spinlock_t rm_lock;
++    int rm_lock_cpu;
++    int rm_lock_count;
++
++    /* lock for linux-specific data, not used by core rm */
++    struct semaphore ldata_lock;
++
++    /* lock for linux-specific alloc queue */
++    struct semaphore at_lock;
++} nv_linux_state_t;
++
++
++/*
++ * file-private data
++ * hide a pointer to our data structures in a file-private ptr
++ * there are times we need to grab this data back from the file
++ * data structure..
++ */
++
++#define NV_EVENT_FIFO_SIZE 6
++
++typedef struct
++{
++    void *nvptr;
++    U032 num_events;
++    U032 put, get;
++    spinlock_t fp_lock;
++    wait_queue_head_t waitqueue;
++    nv_event_t *event_fifo;     // fifo for storing events
++} nv_file_private_t;
++
++#define FILE_PRIVATE(filep)     ((filep)->private_data)
++
++#define NV_GET_NVFP(filep)      ((nv_file_private_t *) FILE_PRIVATE(filep))
++
++/* for the card devices */
++#define NVL_FROM_FILEP(filep)   (NV_GET_NVFP(filep)->nvptr)
++
++#define NV_GET_NVL_FROM_NV_STATE(nv) \
++    ((nv_linux_state_t *) nv->os_state)
++
++#define NV_STATE_PTR(nvl)   (&((nvl)->nv_state))
++
++
++#define NV_ATOMIC_SET(data,val)         atomic_set(&(data), (val))
++#define NV_ATOMIC_INC(data)             atomic_inc(&(data))
++#define NV_ATOMIC_DEC(data)             atomic_dec(&(data))
++#define NV_ATOMIC_DEC_AND_TEST(data)    atomic_dec_and_test(&(data))
++#define NV_ATOMIC_READ(data)            atomic_read(&(data))
++
++/*
++ * AMD Athlon processors expose a subtle bug in the Linux
++ * kernel, that may lead to AGP memory corruption. Recent
++ * kernel versions had a workaround for this problem, but
++ * 2.4.20 is the first kernel to address it properly. The
++ * page_attr API provides the means to solve the problem. 
++ */
++#if defined(NVCPU_X86) && defined(NV_CHANGE_PAGE_ATTR_PRESENT)
++static inline void NV_SET_PAGE_ATTRIB_UNCACHED(nv_pte_t *page_ptr)
++    {
++        struct page *page = virt_to_page(__va(page_ptr->phys_addr));
++        change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
++    }
++static inline void NV_SET_PAGE_ATTRIB_CACHED(nv_pte_t *page_ptr)
++    {
++        struct page *page = virt_to_page(__va(page_ptr->phys_addr));
++#ifdef CONFIG_SWIOTLB
++        if (page_ptr->orig_phys_addr)
++        {
++            nv_printf(NV_DBG_ERRORS, "NVRM: trying to set page attrib on wrong page!\n");
++            os_dbg_breakpoint();
++        }
++#endif
++        change_page_attr(page, 1, PAGE_KERNEL);
++    }
++#else
++#define NV_SET_PAGE_ATTRIB_UNCACHED(page_list)
++#define NV_SET_PAGE_ATTRIB_CACHED(page_list)
++#endif
++
++static inline void NV_UNLOCK_PAGE(nv_pte_t *page_ptr)
++    {
++        unsigned long phys_addr;
++#ifdef CONFIG_SWIOTLB
++    	if (page_ptr->orig_phys_addr) phys_addr = page_ptr->orig_phys_addr;
++        else
++#endif
++    	    phys_addr = page_ptr->phys_addr;
++        ClearPageReserved(NV_GET_PAGE_STRUCT(phys_addr));
++    }
++
++#if defined(NV_SG_MAP_BUFFERS)
++static inline int nv_sg_map_buffer(
++    struct pci_dev     *dev,
++    nv_pte_t          **page_list,
++    void               *base,
++    unsigned int        num_pages
++)
++{
++    struct scatterlist *sg_ptr = &page_list[0]->sg_list;
++    unsigned int i;
++
++    sg_ptr->page = virt_to_page(base);
++    sg_ptr->offset = (unsigned long)base & ~PAGE_MASK;
++    sg_ptr->length  = num_pages * PAGE_SIZE;
++
++#if defined(NV_SWIOTLB)
++    i = NV_SWIOTLB_MAX_RETRIES;
++    do {
++        if (pci_map_sg(dev, sg_ptr, 1, PCI_DMA_BIDIRECTIONAL) == 0)
++            return 1;
++
++        if (sg_ptr->dma_address & ~PAGE_MASK)
++        {
++            struct scatterlist sg_tmp;
++            pci_unmap_sg(dev, sg_ptr, num_pages, PCI_DMA_BIDIRECTIONAL);
++
++            memset(&sg_tmp, 0, sizeof(struct scatterlist));
++            sg_tmp.page = sg_ptr->page;
++            sg_tmp.offset = sg_ptr->offset;
++            sg_tmp.length = 2048;
++
++            if (pci_map_sg(dev, &sg_tmp, 1, PCI_DMA_BIDIRECTIONAL) == 0)
++                return 1;
++
++            if (pci_map_sg(dev, sg_ptr, 1, PCI_DMA_BIDIRECTIONAL) == 0)
++            {
++                pci_unmap_sg(dev, &sg_tmp, num_pages, PCI_DMA_BIDIRECTIONAL);
++                return 1;
++            }
++
++            pci_unmap_sg(dev, &sg_tmp, num_pages, PCI_DMA_BIDIRECTIONAL);
++        }
++    } while (i-- && sg_ptr->dma_address & ~PAGE_MASK);
++#else
++    if (pci_map_sg(dev, sg_ptr, 1, PCI_DMA_BIDIRECTIONAL) == 0)
++    {
++        return 1;
++    }
++#endif
++
++    if (sg_ptr->dma_address & ~PAGE_MASK)
++    {
++        nv_printf(NV_DBG_ERRORS,
++            "NVRM: VM: nv_sg_map_buffer: failed to obtain aligned mapping\n");
++        pci_unmap_sg(dev, sg_ptr, num_pages, PCI_DMA_BIDIRECTIONAL);
++        return 1;
++    }
++
++    NV_FIXUP_SWIOTLB_VIRT_ADDR_BUG(sg_ptr->dma_address);
++
++    // this is a bit of a hack to make contiguous allocations easier to handle
++    // nv_sg_load below relies on the page_ptr addresses being filed in, as 
++    // well as the sg_ptr having a valid dma_address. most allocations call
++    // nv_sg_map_buffers page-by-page, but contiguous allocations will make
++    // one call for the whole allocation. make sure we correctly propogate 
++    // our dma_address through the rest of the sg_ptrs for these allocations.
++    // note we start with index 1, since index 0 is already correct
++    for (i = 1; i < num_pages; i++)
++    {
++        page_list[i]->sg_list.dma_address = sg_ptr->dma_address + (i * PAGE_SIZE);
++    }
++
++    return 0;
++}
++
++static inline int nv_sg_load(
++    struct scatterlist *sg_ptr,
++    nv_pte_t           *page_ptr
++)
++{
++    page_ptr->dma_addr = sg_ptr->dma_address;
++
++#if defined(NV_SWIOTLB)
++    // with the sw io tlb, we've actually switched to different physical pages
++    // wire in the new page's addresses, but save the original off to free later
++    if (nv_swiotlb)
++    {
++        page_ptr->orig_phys_addr = page_ptr->phys_addr;
++        page_ptr->phys_addr      = page_ptr->dma_addr;
++        page_ptr->orig_virt_addr = page_ptr->virt_addr;
++        page_ptr->virt_addr      = (unsigned long) __va(page_ptr->dma_addr);
++    }
++#endif
++
++    return 0;
++}
++
++// make sure we only unmap the page if it was really mapped through the iommu,
++// in which case the dma_addr and phys_addr will not match.
++static inline void nv_sg_unmap_buffer(
++    struct pci_dev     *dev,
++    struct scatterlist *sg_ptr,
++    nv_pte_t           *page_ptr
++)
++{
++#ifdef CONFIG_SWIOTLB
++    // for sw io tlbs, dma_addr == phys_addr currently, so the check below fails
++    // restore the original settings first, then the following check will work
++    if (nv_swiotlb && page_ptr->orig_phys_addr)
++    {
++        page_ptr->phys_addr      = page_ptr->orig_phys_addr;
++        page_ptr->virt_addr      = page_ptr->orig_virt_addr;
++        page_ptr->orig_phys_addr = 0;
++        page_ptr->orig_virt_addr = 0;
++    }
++#endif
++
++    if (page_ptr->dma_addr != page_ptr->phys_addr)
++    {
++        pci_unmap_sg(dev, sg_ptr, 1, PCI_DMA_BIDIRECTIONAL);
++        page_ptr->dma_addr = 0;
++    }
++}
++#endif  /* NV_SG_MAP_BUFFERS */
++
++/*
++ * Basic support for kgdb assertions.
++ */
++#if defined(CONFIG_X86_REMOTE_DEBUG)
++#include <linux/gdb.h>
++
++#define NV_ASSERT(message, condition) KGDB_ASSERT(message, condition)
++#else
++#if defined(DEBUG)
++#define NV_ASSERT(message, condition) \
++if (!(condition)) { \
++    nv_printf(NV_DBG_ERRORS, "NVRM: ASSERT: %s\n", message); \
++    os_dbg_breakpoint(); \
++}
++#else
++#define NV_ASSERT(message, condition)
++#endif /* DEBUG */
++#endif
++
++
++#endif  /* _NV_LINUX_H_ */
+diff -ruN nvidia-kernel.orig/nv/nv-vm.c nvidia-kernel/nv/nv-vm.c
+--- nvidia-kernel.orig/nv/nv-vm.c	2005-01-11 17:19:49.000000000 -0800
++++ nvidia-kernel/nv/nv-vm.c	2005-02-18 15:08:04.603166251 -0800
+@@ -53,12 +53,13 @@
+  * conflicts. we try to rely on the kernel's provided interfaces when possible,
+  * but need additional flushing on earlier kernels.
+  */
+-
++#if defined(KERNEL_2_4)
+ /* wrap CACHE_FLUSH so we can pass it to smp_call_function */
+ static void cache_flush(void *p)
+ {
+     CACHE_FLUSH();
+ }
++#endif
+ 
+ /*
+  * 2.4 kernels handle flushing in the change_page_attr() call, but kernels 
+@@ -138,13 +139,18 @@
+  */
+ 
+ int nv_vm_malloc_pages(
+-    nv_alloc_t       *at
++    nv_state_t *nv,
++    nv_alloc_t *at
+ )
+ {
+     /* point page_ptr at the start of the actual page list */
+-    nv_pte_t *page_ptr = at->page_table;
++    nv_pte_t *page_ptr = *at->page_table;
+     int i;
+     unsigned long virt_addr = 0, phys_addr;
++#if defined(NV_SG_MAP_BUFFERS)
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++    struct pci_dev *dev = nvl->dev;
++#endif
+ 
+     nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_vm_malloc_pages: %d pages\n",
+         at->num_pages);
+@@ -175,8 +181,7 @@
+         // for amd 64-bit platforms, remap pages to make them 32-bit addressable
+         // in this case, we need the final remapping to be contiguous, so we
+         // have to do the whole mapping at once, instead of page by page
+-        if (nv_sg_map_buffer(at->dev, &at->sg_list[0],
+-                             (void *) virt_addr, at->num_pages))
++        if (nv_sg_map_buffer(dev, at->page_table, (void *) virt_addr, at->num_pages))
+         {
+             nv_printf(NV_DBG_ERRORS,
+                 "NVRM: VM: nv_vm_malloc_pages: failed to sg map contiguous pages\n");
+@@ -224,7 +229,7 @@
+         /* lock the page for dma purposes */
+         SetPageReserved(NV_GET_PAGE_STRUCT(phys_addr));
+ 
+-        page_ptr = &at->page_table[i];
++        page_ptr = at->page_table[i];
+         page_ptr->phys_addr = phys_addr;
+         page_ptr->virt_addr = virt_addr;
+         page_ptr->dma_addr = page_ptr->phys_addr;
+@@ -235,7 +240,7 @@
+ #if defined(NV_SG_MAP_BUFFERS)
+         if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
+         {
+-            if (nv_sg_map_buffer(at->dev, &at->sg_list[i], 
++            if (nv_sg_map_buffer(dev, &at->page_table[i],
+                                  __va(page_ptr->phys_addr), 1))
+             {
+                 nv_printf(NV_DBG_ERRORS,
+@@ -243,7 +248,7 @@
+                 goto failed;
+             }
+         }
+-        nv_sg_load(&at->sg_list[i], page_ptr);
++        nv_sg_load(&at->page_table[i]->sg_list, page_ptr);
+ #endif
+         virt_addr += PAGE_SIZE;
+     }
+@@ -258,7 +263,7 @@
+ 
+     for (; i >= 0; i--)
+     {
+-        page_ptr = &at->page_table[i];
++        page_ptr = at->page_table[i];
+ 
+         // if we failed when allocating this page, skip over it
+         // but if we failed pci_map_sg, make sure to free this page
+@@ -267,7 +272,7 @@
+             NV_UNLOCK_PAGE(page_ptr);
+ #if defined(NV_SG_MAP_BUFFERS)
+             if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
+-                nv_sg_unmap_buffer(at->dev, &at->sg_list[i], page_ptr);
++                nv_sg_unmap_buffer(dev, &at->page_table[i]->sg_list, page_ptr);
+ #endif
+             if (!NV_ALLOC_MAPPING_CACHED(at->flags))
+                 NV_SET_PAGE_ATTRIB_CACHED(page_ptr);
+@@ -279,15 +284,15 @@
+ 
+     if (NV_ALLOC_MAPPING_CONTIG(at->flags))
+     {
+-        page_ptr = at->page_table;
++        page_ptr = *at->page_table;
+ #if defined(NV_SG_MAP_BUFFERS)
+-        nv_sg_unmap_buffer(at->dev, &at->sg_list[0], page_ptr);
++        nv_sg_unmap_buffer(dev, &at->page_table[0]->sg_list, page_ptr);
+ #endif
+         NV_FREE_PAGES(page_ptr->virt_addr, at->order);
+     }
+     else if (NV_ALLOC_MAPPING_VMALLOC(at->flags))
+     {
+-        page_ptr = at->page_table;
++        page_ptr = *at->page_table;
+         NV_VFREE((void *) page_ptr->virt_addr, at->size);
+     }
+ 
+@@ -296,7 +301,7 @@
+ 
+ // unlock the pages we've locked down for dma purposes
+ void nv_vm_unlock_pages(
+-    nv_alloc_t       *at
++    nv_alloc_t *at
+ )
+ {
+     nv_pte_t *page_ptr;
+@@ -315,17 +320,22 @@
+ 
+     for (i = 0; i < at->num_pages; i++)
+     {
+-        page_ptr = &at->page_table[i];
++        page_ptr = at->page_table[i];
+         NV_UNLOCK_PAGE(page_ptr);
+     }
+ }
+ 
+ void nv_vm_free_pages(
+-    nv_alloc_t       *at
++    nv_state_t *nv,
++    nv_alloc_t *at
+ )
+ {
+     nv_pte_t *page_ptr;
+     int i;
++#if defined(NV_SG_MAP_BUFFERS)
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++    struct pci_dev *dev = nvl->dev;
++#endif
+ 
+     nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_vm_free_pages: %d pages\n",
+         at->num_pages);
+@@ -339,10 +349,10 @@
+ 
+     for (i = 0; i < at->num_pages; i++)
+     {
+-        page_ptr = &at->page_table[i];
++        page_ptr = at->page_table[i];
+ #if defined(NV_SG_MAP_BUFFERS)
+         if (!NV_ALLOC_MAPPING_CONTIG(at->flags))
+-            nv_sg_unmap_buffer(at->dev, &at->sg_list[i], page_ptr);
++            nv_sg_unmap_buffer(dev, &at->page_table[i]->sg_list, page_ptr);
+ #endif
+         if (!NV_ALLOC_MAPPING_CACHED(at->flags))
+             NV_SET_PAGE_ATTRIB_CACHED(page_ptr);
+@@ -353,15 +363,15 @@
+ 
+     if (NV_ALLOC_MAPPING_CONTIG(at->flags))
+     {
+-        page_ptr = at->page_table;
++        page_ptr = *at->page_table;
+ #if defined(NV_SG_MAP_BUFFERS)
+-        nv_sg_unmap_buffer(at->dev, &at->sg_list[0], page_ptr);
++        nv_sg_unmap_buffer(dev, &at->page_table[0]->sg_list, page_ptr);
+ #endif
+         NV_FREE_PAGES(page_ptr->virt_addr, at->order);
+     }
+     else if (NV_ALLOC_MAPPING_VMALLOC(at->flags))
+     {
+-        page_ptr = at->page_table;
++        page_ptr = *at->page_table;
+         NV_VFREE((void *) page_ptr->virt_addr, at->size);
+     }
+ }
+diff -ruN nvidia-kernel.orig/nv/nv-vm.h nvidia-kernel/nv/nv-vm.h
+--- nvidia-kernel.orig/nv/nv-vm.h	2005-01-11 17:19:49.000000000 -0800
++++ nvidia-kernel/nv/nv-vm.h	2005-02-18 15:07:19.974094183 -0800
+@@ -11,9 +11,9 @@
+ #ifndef _NV_VM_H_
+ #define _NV_VM_H_
+ 
+-int      nv_vm_malloc_pages(nv_alloc_t *);
++int      nv_vm_malloc_pages(nv_state_t *, nv_alloc_t *);
+ void     nv_vm_unlock_pages(nv_alloc_t *);
+-void     nv_vm_free_pages(nv_alloc_t *);
++void     nv_vm_free_pages(nv_state_t *, nv_alloc_t *);
+ 
+ #if defined(NV_DBG_MEM)
+ void     nv_vm_list_page_count(nv_pte_t *, unsigned long);
+@@ -21,11 +21,12 @@
+ #define  nv_vm_list_page_count(page_ptr, num_pages)
+ #endif
+ 
+-#define nv_vm_unlock_and_free_pages(at_count, at) \
+-    if (at->page_table) {                         \
+-        if (at_count == 0)                        \
+-            nv_vm_unlock_pages(at);               \
+-        nv_vm_free_pages(at);                     \
++#define NV_VM_UNLOCK_AND_FREE_PAGES(nv, at_count, at)   \
++    if (at->page_table)                                 \
++    {                                                   \
++        if (at_count == 0)                              \
++            nv_vm_unlock_pages(at);                     \
++        nv_vm_free_pages(nv, at);                       \
+     }
+ 
+ #endif
+diff -ruN nvidia-kernel.orig/nv/nv.c nvidia-kernel/nv/nv.c
+--- nvidia-kernel.orig/nv/nv.c	2005-01-11 17:19:49.000000000 -0800
++++ nvidia-kernel/nv/nv.c	2005-02-18 15:08:12.955056936 -0800
+@@ -63,6 +63,8 @@
+ int nv_swiotlb = 0;
+ #endif
+ 
++static kmem_cache_t *nv_pte_t_cache = NULL;
++
+ // allow an easy way to convert all debug printfs related to events
+ // back and forth between 'info' and 'errors'
+ #if defined(NV_DBG_EVENTS)
+@@ -266,42 +268,41 @@
+ )
+ {
+     nv_alloc_t *at;
+-    int pt_size;
++    unsigned int pt_size, i;
+ 
+     NV_KMALLOC(at, sizeof(nv_alloc_t));
+     if (at == NULL)
+     {
+-        nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc_t\n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n");
+         return NULL;
+     }
+ 
+     memset(at, 0, sizeof(nv_alloc_t));
+ 
+-    pt_size = num_pages *  sizeof(nv_pte_t);
+-    NV_KMALLOC(at->page_table, pt_size);
+-    if (at->page_table == NULL)
++    pt_size = num_pages *  sizeof(nv_pte_t *);
++    if (os_alloc_mem((void **)&at->page_table, pt_size) != RM_OK)
+     {
+         nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n");
+         NV_KFREE(at, sizeof(nv_alloc_t));
+         return NULL;
+     }
++
+     memset(at->page_table, 0, pt_size);
+     at->num_pages = num_pages;
+     NV_ATOMIC_SET(at->usage_count, 0);
+ 
+-#if defined(NV_SG_MAP_BUFFERS)
+-    at->dev = dev;
+-    pt_size = num_pages * sizeof(struct scatterlist);
+-    NV_KMALLOC(at->sg_list, pt_size);
+-    if (at->sg_list == NULL)
++    for (i = 0; i < at->num_pages; i++)
+     {
+-        nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate scatter gather list\n");
+-        NV_KFREE(at->page_table, pt_size);
+-        NV_KFREE(at, sizeof(nv_alloc_t));
+-        return NULL;
++        NV_KMEM_CACHE_ALLOC(at->page_table[i], nv_pte_t_cache, nv_pte_t);
++        if (at->page_table[i] == NULL)
++        {
++            nv_printf(NV_DBG_ERRORS,
++                      "NVRM: failed to allocate page table entry\n");
++            nvos_free_alloc(at);
++            return NULL;
++        }
++        memset(at->page_table[i], 0, sizeof(nv_pte_t));
+     }
+-    memset(at->sg_list, 0, pt_size);
+-#endif
+ 
+     return at;
+ }
+@@ -311,6 +312,8 @@
+     nv_alloc_t *at
+ )
+ {
++    unsigned int pt_size, i;
++
+     if (at == NULL)
+         return -1;
+ 
+@@ -320,13 +323,16 @@
+     // we keep the page_table around after freeing the pages
+     // for bookkeeping reasons. Free the page_table and assume
+     // the underlying pages are already unlocked and freed.
+-    if (at->page_table)
+-        NV_KFREE(at->page_table, at->num_pages * sizeof(nv_pte_t));
+-
+-#if defined(NV_SG_MAP_BUFFERS)
+-    if (at->sg_list)
+-        NV_KFREE(at->sg_list, at->num_pages * sizeof(struct scatterlist));
+-#endif
++    if (at->page_table != NULL)
++    {
++        for (i = 0; i < at->num_pages; i++)
++        {
++            if (at->page_table[i] != NULL)
++                NV_KMEM_CACHE_FREE(at->page_table[i], nv_pte_t, nv_pte_t_cache);
++        }
++        pt_size = at->num_pages * sizeof(nv_pte_t *);
++        os_free_mem(at->page_table);
++    }
+ 
+     NV_KFREE(at, sizeof(nv_alloc_t));
+ 
+@@ -594,7 +600,7 @@
+             int i;
+             for (i = 0; i < at->num_pages; i++)
+             {
+-                unsigned long offset = at->page_table[i].phys_addr;
++                unsigned long offset = at->page_table[i]->phys_addr;
+                 if ((address >= offset) &&
+                     (address < (offset + PAGE_SIZE)))
+                     return at;
+@@ -931,6 +937,13 @@
+     }
+ #endif
+ 
++    NV_KMEM_CACHE_CREATE(nv_pte_t_cache, "nv_pte_t", nv_pte_t);
++    if (nv_pte_t_cache == NULL)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: pte cache allocation failed\n");
++        goto failed;
++    }
++ 
+     // Init the resource manager
+     if (!rm_init_rm())
+     {
+@@ -972,6 +985,14 @@
+     return 0;
+ 
+ failed:
++    if (nv_pte_t_cache != NULL)
++        NV_KMEM_CACHE_DESTROY(nv_pte_t_cache);
++
++#if defined(NV_PM_SUPPORT_APM)
++    for (i = 0; i < num_nv_devices; i++)
++        if (apm_nv_dev[i] != NULL) pm_unregister(apm_nv_dev[i]);
++#endif
++
+ #ifdef CONFIG_DEVFS_FS
+     NV_DEVFS_REMOVE_CONTROL();
+     for (i = 0; i < num_nv_devices; i++)
+@@ -1101,6 +1122,8 @@
+     nv_printf(NV_DBG_ERRORS, "NVRM: final mem usage: vm 0x%x km 0x%x fp 0x%x\n",
+         vm_usage, km_usage, fp_usage);
+ #endif
++
++    NV_KMEM_CACHE_DESTROY(nv_pte_t_cache);
+ }
+ 
+ module_init(nvidia_init_module);
+@@ -1249,15 +1272,15 @@
+     index = (address - vma->vm_start)>>PAGE_SHIFT;
+ 
+     // save that index into our page list (make sure it doesn't already exist)
+-    if (at->page_table[index].phys_addr)
++    if (at->page_table[index]->phys_addr)
+     {
+         nv_printf(NV_DBG_ERRORS, "NVRM: page slot already filled in nopage handler!\n");
+         os_dbg_breakpoint();
+     }
+ 
+-    at->page_table[index].phys_addr = (page_to_pfn(page_ptr) << PAGE_SHIFT);
+-    at->page_table[index].dma_addr  = (page_to_pfn(page_ptr) << PAGE_SHIFT);
+-    at->page_table[index].virt_addr = (unsigned long) __va(page_to_pfn(page_ptr) << PAGE_SHIFT);
++    at->page_table[index]->phys_addr = (page_to_pfn(page_ptr) << PAGE_SHIFT);
++    at->page_table[index]->dma_addr  = (page_to_pfn(page_ptr) << PAGE_SHIFT);
++    at->page_table[index]->virt_addr = (unsigned long) __va(page_to_pfn(page_ptr) << PAGE_SHIFT);
+ 
+     return page_ptr;
+ #endif
+@@ -1551,7 +1574,7 @@
+     /* NV fb space */
+     else if (IS_FB_OFFSET(nv, NV_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
+     {
+-        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++        vma->vm_page_prot = pgprot_noncached_weak(vma->vm_page_prot);
+         if (NV_REMAP_PAGE_RANGE(vma->vm_start,
+                              NV_VMA_OFFSET(vma),
+                              vma->vm_end - vma->vm_start,
+@@ -1604,9 +1627,8 @@
+         }
+         nv_vm_list_page_count(at->page_table, at->num_pages);
+ 
+-        /* prevent the swapper from swapping it out */
+-        /* mark the memory i/o so the buffers aren't dumped on core dumps */
+-        vma->vm_flags |= (VM_LOCKED | VM_IO);
++        // mark it as IO so that we don't dump it on core dump
++        vma->vm_flags |= VM_IO;
+     }
+ 
+     /* Magic allocator */
+@@ -1670,7 +1692,7 @@
+         start = vma->vm_start;
+         while (pages--)
+         {
+-            page = (unsigned long) at->page_table[i++].phys_addr;
++            page = (unsigned long) at->page_table[i++]->phys_addr;
+             if (NV_REMAP_PAGE_RANGE(start, page, PAGE_SIZE, vma->vm_page_prot))
+                 return -EAGAIN;
+             start += PAGE_SIZE;
+@@ -2368,8 +2390,8 @@
+ 
+         for (i = 0; i < at->num_pages; i++)
+         {
+-            if (address == at->page_table[i].phys_addr)
+-                return (void *)(at->page_table[i].virt_addr + offset);
++            if (address == at->page_table[i]->phys_addr)
++                return (void *)(at->page_table[i]->virt_addr + offset);
+         }
+     }
+ 
+@@ -2400,8 +2422,8 @@
+ 
+         for (i = 0; i < at->num_pages; i++)
+         {
+-            if (address == at->page_table[i].phys_addr)
+-                return (unsigned long)at->page_table[i].dma_addr + offset;
++            if (address == at->page_table[i]->phys_addr)
++                return (unsigned long)at->page_table[i]->dma_addr + offset;
+         }
+     }
+ 
+@@ -2427,9 +2449,9 @@
+             unsigned long address = dma_address & PAGE_MASK;
+             for (i = 0; i < at->num_pages; i++)
+             {
+-                if (address == at->page_table[i].dma_addr)
++                if (address == at->page_table[i]->dma_addr)
+                 {
+-                    return at->page_table[i].phys_addr + offset;
++                    return at->page_table[i]->phys_addr + offset;
+                 }
+             }
+         }
+@@ -2466,7 +2488,7 @@
+         int i;
+         for (i = 0; i < at->num_pages; i++)
+         {
+-            if (address == (unsigned long) at->page_table[i].dma_addr)
++            if (address == (unsigned long) at->page_table[i]->dma_addr)
+             {
+                 return (void *)((unsigned long) at->key_mapping + 
+                     (i * PAGE_SIZE));
+@@ -2492,26 +2514,23 @@
+ )
+ {
+     struct mm_struct *mm;
+-    pgd_t *pg_dir;
+-    pmd_t *pg_mid_dir;
+-    pte_t *pte;
++    pgd_t *pgd = NULL;
++    pmd_t *pmd = NULL;
++    pte_t *pte = NULL;
+     unsigned long retval;
+ 
+     mm = (kern) ? &init_mm : current->mm;
+     spin_lock(&mm->page_table_lock);
+ 
+-    if (kern) pg_dir = pgd_offset_k(address);
+-    else pg_dir = pgd_offset(mm, address);
+-
+-    if (!pg_dir || pgd_none(*pg_dir))
++    pgd = NV_PGD_OFFSET(address, kern, mm);
++    if (!NV_PGD_PRESENT(pgd))
+         goto failed;
+ 
+-    NV_PMD_OFFSET(address, pg_dir, pg_mid_dir);
+-    if (!NV_PMD_PRESENT(pg_mid_dir))
++    pmd = NV_PMD_OFFSET(address, pgd);
++    if (!NV_PMD_PRESENT(pmd))
+         goto failed;
+ 
+-    NV_PTE_OFFSET(address, pg_mid_dir, pte);
+-
++    pte = NV_PTE_OFFSET(address, pmd);
+     if (!NV_PTE_PRESENT(pte))
+         goto failed;
+ 
+@@ -2630,7 +2649,7 @@
+             nvl_add_alloc(nvl, at);
+         } else {
+             /* use nvidia's nvagp support */
+-            if (nv_vm_malloc_pages(at))
++            if (nv_vm_malloc_pages(nv, at))
+                 goto failed;
+ 
+             at->class = class;
+@@ -2654,7 +2673,7 @@
+             if (rm_status)
+             {
+                 nvl_remove_alloc(nvl, at);
+-                nv_vm_unlock_and_free_pages(NV_ATOMIC_READ(at->usage_count), at);
++                NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
+                 goto failed;
+             }
+             at->priv_data = *priv_data;
+@@ -2666,12 +2685,12 @@
+     else 
+     {
+ 
+-        if (nv_vm_malloc_pages(at))
++        if (nv_vm_malloc_pages(nv, at))
+             goto failed;
+ 
+         if (kernel)
+         {
+-            *pAddress = (void *) at->page_table[0].virt_addr;
++            *pAddress = (void *) at->page_table[0]->virt_addr;
+         }
+         else
+         {
+@@ -2679,7 +2698,7 @@
+              * so use the first page, which is page-aligned. this way, our 
+              * allocated page table does not need to be page-aligned
+              */
+-            *pAddress = (void *) at->page_table[0].phys_addr;
++            *pAddress = (void *) at->page_table[0]->phys_addr;
+         }
+ 
+         nvl_add_alloc(nvl, at);
+@@ -2743,7 +2762,7 @@
+             rmStatus = rm_free_agp_pages(nv, pAddress, priv_data);
+             if (rmStatus == RM_OK)
+             {
+-                nv_vm_unlock_and_free_pages(NV_ATOMIC_READ(at->usage_count), at);
++                NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
+             }
+         }
+     } else {
+@@ -2759,7 +2778,7 @@
+ 
+         NV_ATOMIC_DEC(at->usage_count);
+ 
+-        nv_vm_unlock_and_free_pages(NV_ATOMIC_READ(at->usage_count), at);
++        NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
+     }
+ 
+     if (NV_ATOMIC_READ(at->usage_count) == 0)
+@@ -2769,21 +2788,13 @@
+ }
+ 
+ 
+-/* avoid compiler warnings on UP kernels, 
+- * when spinlock macros are defined away 
+- */
+-#define NO_COMPILER_WARNINGS(nvl) \
+-    if (nvl == NULL) return
+-
+-
+ static void nv_lock_init_locks
+ ( 
+     nv_state_t *nv
+ )
+ {
+-    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+-
+-    NO_COMPILER_WARNINGS(nvl);
++    nv_linux_state_t *nvl;
++    nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+ 
+     spin_lock_init(&nvl->rm_lock);
+ 
+@@ -2799,28 +2810,33 @@
+     nv_state_t *nv
+ )
+ {
+-    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+-    NO_COMPILER_WARNINGS(nvl);
++    nv_linux_state_t *nvl;
++    int cpu;
++
++    nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++    cpu = get_cpu();
+ 
+-    if (nvl->rm_lock_cpu == smp_processor_id())
++    if (nvl->rm_lock_cpu == cpu)
+     {
+         nvl->rm_lock_count++;
++        put_cpu();
+         return;
+     }
+ 
++    put_cpu();
+     spin_unlock_wait(&nvl->rm_lock);
+     spin_lock_irq(&nvl->rm_lock);
+ 
+-   nvl->rm_lock_cpu = smp_processor_id();
+-   nvl->rm_lock_count = 1;
++    nvl->rm_lock_cpu = smp_processor_id();
++    nvl->rm_lock_count = 1;
+ }
+ 
+ void NV_API_CALL nv_unlock_rm(
+     nv_state_t *nv
+ )
+ {
+-    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+-    NO_COMPILER_WARNINGS(nvl);
++    nv_linux_state_t *nvl;
++    nvl = NV_GET_NVL_FROM_NV_STATE(nv);
+ 
+     if (--nvl->rm_lock_count)
+         return;
+@@ -2987,32 +3003,47 @@
+      */
+     if ( (!NV_AGP_ENABLED(nv)) && (config & NVOS_AGP_CONFIG_NVAGP) )
+     {
+-        /* make sure the user does not have agpgart loaded */
+-        if (inter_module_get("drm_agp")) {
++#if defined(KERNEL_2_4)
++        if (inter_module_get("drm_agp"))
++        {
+             inter_module_put("drm_agp");
+-            nv_printf(NV_DBG_WARNINGS, "NVRM: not using NVAGP, AGPGART is loaded!!\n");
+-        } else {
+-#if defined(CONFIG_X86_64) && defined(CONFIG_GART_IOMMU)
++            nv_printf(NV_DBG_WARNINGS, "NVRM: not using NVAGP, AGPGART is loaded!\n");
++            return status;
++        }
++#elif defined(AGPGART)
++        int error;
++        /*
++         * We can only safely use NvAGP when no backend has been
++         * registered with the AGPGART frontend. This condition
++         * is only met when the acquire function returns -EINVAL.
++         *
++         * Other return codes indicate that a backend is present
++         * and was either acquired, busy or else unavailable.
++         */
++        if ((error = agp_backend_acquire()) != -EINVAL)
++        {
++            if (!error) agp_backend_release();
+             nv_printf(NV_DBG_WARNINGS,
+-                "NVRM: not using NVAGP, kernel was compiled with GART_IOMMU support!!\n");
+-#else
+-            status = rm_init_agp(nv);
+-            if (status == RM_OK)
+-            {
+-                nv->agp_config = NVOS_AGP_CONFIG_NVAGP;
+-                nv->agp_status = NV_AGP_STATUS_ENABLED;
+-            }
++                      "NVRM: not using NVAGP, an AGPGART backend is loaded!\n");
++            return status;
++        }
+ #endif
++#if defined(CONFIG_X86_64) && defined(CONFIG_GART_IOMMU)
++        nv_printf(NV_DBG_WARNINGS,
++            "NVRM: not using NVAGP, kernel was compiled with GART_IOMMU support!\n");
++#else
++        status = rm_init_agp(nv);
++        if (status == RM_OK)
++        {
++            nv->agp_config = NVOS_AGP_CONFIG_NVAGP;
++            nv->agp_status = NV_AGP_STATUS_ENABLED;
+         }
++#endif
+     }
+ 
+     if (NV_AGP_ENABLED(nv))
+         old_error = 0; /* report new errors */
+ 
+-    nv_printf(NV_DBG_SETUP, 
+-        "NVRM: agp_init finished with status 0x%x and config %d\n",
+-        status, nv->agp_config);
+-
+     return status;
+ }
+ 
+@@ -3036,9 +3067,6 @@
+     nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP;
+     nv->agp_status = NV_AGP_STATUS_DISABLED;
+ 
+-    nv_printf(NV_DBG_SETUP, "NVRM: teardown finished with status 0x%x\n", 
+-        status);
+-
+     return status;
+ }
+ 
+@@ -3065,7 +3093,7 @@
+     }
+ 
+     /* get the physical address of this page */
+-    *paddr = (U032) ((NV_UINTPTR_T)at->page_table[index].dma_addr);
++    *paddr = (U032) ((NV_UINTPTR_T)at->page_table[index]->dma_addr);
+ 
+     return RM_OK;
+ }
+diff -ruN nvidia-kernel.orig/nv/nv.c.orig nvidia-kernel/nv/nv.c.orig
+--- nvidia-kernel.orig/nv/nv.c.orig	1969-12-31 16:00:00.000000000 -0800
++++ nvidia-kernel/nv/nv.c.orig	2005-02-18 15:08:04.612165056 -0800
+@@ -0,0 +1,3396 @@
++/* _NVRM_COPYRIGHT_BEGIN_
++ *
++ * Copyright 1999-2001 by NVIDIA Corporation.  All rights reserved.  All
++ * information contained herein is proprietary and confidential to NVIDIA
++ * Corporation.  Any use, reproduction, or disclosure without the written
++ * permission of NVIDIA Corporation is prohibited.
++ *
++ * _NVRM_COPYRIGHT_END_
++ */
++
++
++#include "nv-misc.h"
++#include "os-interface.h"
++#include "nv-linux.h"
++#include "nv_compiler.h"
++#include "os-agp.h"
++#include "nv-vm.h"
++
++#ifdef MODULE_ALIAS_CHARDEV_MAJOR
++MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
++#endif
++
++/*
++ * our global state; one per device
++ */
++
++static int num_nv_devices = 0;
++
++nv_linux_state_t nv_linux_devices[NV_MAX_DEVICES] = { { { 0 } } };
++
++#if defined(NV_PM_SUPPORT_APM)
++static struct pm_dev *apm_nv_dev[NV_MAX_DEVICES] = { 0 };
++#endif
++
++#if defined(NV_BUILD_NV_PAT_SUPPORT)
++static int pat_enabled = 0;
++#endif
++
++/*
++ * And one for the control device
++ */
++
++nv_linux_state_t nv_ctl_device = { { 0 } };
++wait_queue_head_t nv_ctl_waitqueue;
++
++// keep track of opened clients and their process id so they
++//   can be free'd up on abnormal close
++nv_client_t       nv_clients[NV_MAX_CLIENTS];
++
++#ifdef CONFIG_PROC_FS
++struct proc_dir_entry *proc_nvidia;
++#endif
++
++#ifdef CONFIG_DEVFS_FS
++devfs_handle_t nv_devfs_handles[NV_MAX_DEVICES+1];
++#endif
++
++#ifdef NV_CLASS_SIMPLE_CREATE_PRESENT
++struct class_simple *class_nvidia;
++#endif
++
++#ifdef NV_SWIOTLB
++int nv_swiotlb = 0;
++#endif
++
++static kmem_cache_t *nv_pte_t_cache = NULL;
++
++// allow an easy way to convert all debug printfs related to events
++// back and forth between 'info' and 'errors'
++#if defined(NV_DBG_EVENTS)
++#define NV_DBG_EVENTINFO NV_DBG_ERRORS
++#else
++#define NV_DBG_EVENTINFO NV_DBG_INFO
++#endif
++
++// verify access to pci config space wasn't disabled behind our back
++// unfortunately, XFree86 enables/disables memory access in pci config space at
++// various times (such as restoring initial pci config space settings during vt
++// switches or when doing mulicard). As a result, all of our register accesses
++// are garbage at this point. add a check to see if access was disabled and
++// reenable any such access.
++#define NV_CHECK_PCI_CONFIG(nv) \
++    nv_check_pci_config(nv, __LINE__)
++
++static inline void nv_check_pci_config(nv_state_t *nv, int line)
++{
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++    unsigned short cmd, flag = 0;
++
++    // don't do this on the control device, only the actual devices
++    if (nv->flags & NV_FLAG_CONTROL)
++        return;
++
++    pci_read_config_word(nvl->dev, PCI_COMMAND, &cmd);
++    if (!(cmd & PCI_COMMAND_MASTER))
++    {
++        nv_printf(NV_DBG_USERERRORS, "NVRM: restoring bus mastering! (%d)\n", line);
++        cmd |= PCI_COMMAND_MASTER;
++        flag = 1;
++    }
++
++    if (!(cmd & PCI_COMMAND_MEMORY))
++    {
++        nv_printf(NV_DBG_USERERRORS, "NVRM: restoring MEM access! (%d)\n", line);
++        cmd |= PCI_COMMAND_MEMORY;
++        flag = 1;
++    }
++
++    if (flag)
++        pci_write_config_word(nvl->dev, PCI_COMMAND, cmd);
++}
++
++/***
++ *** STATIC functions, only in this file
++ ***/
++
++/* nvos_ functions.. do not take a state device parameter  */
++static int      nvos_post_vbios(nv_ioctl_post_vbios_t *info);
++static void     nvos_proc_create(void);
++static void     nvos_proc_remove_all(struct proc_dir_entry *);
++static void     nvos_proc_remove(void);
++static int      nvos_count_devices(void);
++
++static nv_alloc_t  *nvos_create_alloc(struct pci_dev *, int);
++static int          nvos_free_alloc(nv_alloc_t *);
++
++/* nvl_ functions.. take a linux state device pointer */
++static nv_alloc_t  *nvl_find_alloc(nv_linux_state_t *, unsigned long, unsigned long);
++static int          nvl_add_alloc(nv_linux_state_t *, nv_alloc_t *);
++static int          nvl_remove_alloc(nv_linux_state_t *, nv_alloc_t *);
++
++/* lock-related functions that should only be called from this file */
++static void nv_lock_init_locks(nv_state_t *nv);
++
++#define nv_init_lock(lock)  spin_lock_init(&lock)
++#define nv_lock(lock)       spin_lock(&lock)
++#define nv_unlock(lock)     spin_unlock(&lock)
++#define nv_down(lock)       down(&lock)
++#define nv_up(lock)         up(&lock)
++
++#define nv_lock_irq(lock,flags)    spin_lock_irqsave(&lock,flags)
++#define nv_unlock_irq(lock,flags)  spin_unlock_irqrestore(&lock,flags)
++
++
++/***
++ *** EXPORTS to Linux Kernel
++ ***/
++
++/* nv_kern_ functions, interfaces used by linux kernel */
++void          nv_kern_vma_open(struct vm_area_struct *vma);
++void          nv_kern_vma_release(struct vm_area_struct *vma);
++
++int           nv_kern_open(struct inode *, struct file *);
++int           nv_kern_close(struct inode *, struct file *);
++int           nv_kern_mmap(struct file *, struct vm_area_struct *);
++unsigned int  nv_kern_poll(struct file *, poll_table *);
++int           nv_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
++void          nv_kern_isr_bh(unsigned long);
++irqreturn_t   nv_kern_isr(int, void *, struct pt_regs *);
++void          nv_kern_rc_timer(unsigned long);
++#if defined(NV_PM_SUPPORT_APM)
++int           nv_kern_apm_event(struct pm_dev *dev, pm_request_t rqst, void *data);
++#endif
++
++int           nv_kern_read_cardinfo(char *, char **, off_t off, int, int *, void *);
++int           nv_kern_read_status(char *, char **, off_t off, int, int *, void *);
++int           nv_kern_read_agpinfo(char *, char **, off_t off, int, int *, void *);
++int           nv_kern_read_version(char *, char **, off_t off, int, int *, void *);
++
++int           nv_kern_ctl_open(struct inode *, struct file *);
++int           nv_kern_ctl_close(struct inode *, struct file *);
++unsigned int  nv_kern_ctl_poll(struct file *, poll_table *);
++
++int nv_kern_probe(struct pci_dev *, const struct pci_device_id *);
++#if defined(NV_PM_SUPPORT_ACPI)
++int nv_kern_acpi_standby(struct pci_dev *, u32);
++int nv_kern_acpi_resume(struct pci_dev *);
++#endif
++
++/***
++ *** see nv.h for functions exported to other parts of resman
++ ***/
++
++static struct pci_device_id nv_pci_table[] = {
++    { 
++        .vendor      = PCI_VENDOR_ID_NVIDIA,
++        .device      = PCI_ANY_ID,
++        .subvendor   = PCI_ANY_ID,
++        .subdevice   = PCI_ANY_ID,
++        .class       = (PCI_CLASS_DISPLAY_VGA << 8),
++        .class_mask  = ~0,
++    },
++    { }
++};
++
++MODULE_DEVICE_TABLE(pci, nv_pci_table);
++
++static struct pci_driver nv_pci_driver = {
++    .name     = "nvidia",
++    .id_table = nv_pci_table,
++    .probe    = nv_kern_probe,
++#if defined(NV_PM_SUPPORT_ACPI)
++    .suspend  = nv_kern_acpi_standby,
++    .resume   = nv_kern_acpi_resume,
++#endif
++};
++
++/* character driver entry points */
++
++static struct file_operations nv_fops = {
++    .owner     = THIS_MODULE,
++    .poll      = nv_kern_poll,
++    .ioctl     = nv_kern_ioctl,
++    .mmap      = nv_kern_mmap,
++    .open      = nv_kern_open,
++    .release   = nv_kern_close,
++};
++
++// Our reserved major device number.
++int nv_major = NV_MAJOR_DEVICE_NUMBER;
++
++// pull in the pointer to the NVID stamp from the binary module
++extern const char *pNVRM_ID;
++
++#if NV_ENABLE_MEM_TRACKING
++// poor man's memory tracker, see nv-linux.h
++int vm_usage = 0;
++int km_usage = 0;
++int fp_usage = 0;
++
++struct mem_track_t *vm_list = NULL;
++struct mem_track_t *km_list = NULL;
++struct mem_track_t *fp_list = NULL;
++#endif /* NV_ENABLE_MEM_TRACKING */
++
++/***
++ *** STATIC functions
++ ***/
++
++/* specify that this card needs it's vbios posted */
++static int nvos_post_vbios(nv_ioctl_post_vbios_t *info)
++{
++    int i;
++
++    for (i = 0; i < NV_MAX_DEVICES; i++)
++    {
++        nv_state_t *nv = NV_STATE_PTR(&nv_linux_devices[i]);
++        if (nv->bus == info->bus && nv->slot == info->slot)
++        {
++            // we assume any device was already posted and rely on
++            // X to tell us which cards need posting. But if we've
++            // already manually posted a card, it doesn't need to 
++            // be reposted again.
++            if (!(nv->flags & NV_FLAG_WAS_POSTED))
++            {
++                nv->flags |= NV_FLAG_NEEDS_POSTING;
++            }
++        }
++    }
++
++    return 0;
++}
++
++static 
++nv_alloc_t *nvos_create_alloc(
++    struct pci_dev *dev,
++    int num_pages
++)
++{
++    nv_alloc_t *at;
++    unsigned int pt_size, i;
++
++    NV_KMALLOC(at, sizeof(nv_alloc_t));
++    if (at == NULL)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n");
++        return NULL;
++    }
++
++    memset(at, 0, sizeof(nv_alloc_t));
++
++    pt_size = num_pages *  sizeof(nv_pte_t *);
++    if (os_alloc_mem((void **)&at->page_table, pt_size) != RM_OK)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n");
++        NV_KFREE(at, sizeof(nv_alloc_t));
++        return NULL;
++    }
++
++    memset(at->page_table, 0, pt_size);
++    at->num_pages = num_pages;
++    NV_ATOMIC_SET(at->usage_count, 0);
++
++    for (i = 0; i < at->num_pages; i++)
++    {
++        NV_KMEM_CACHE_ALLOC(at->page_table[i], nv_pte_t_cache, nv_pte_t);
++        if (at->page_table[i] == NULL)
++        {
++            nv_printf(NV_DBG_ERRORS,
++                      "NVRM: failed to allocate page table entry\n");
++            nvos_free_alloc(at);
++            return NULL;
++        }
++        memset(at->page_table[i], 0, sizeof(nv_pte_t));
++    }
++
++    return at;
++}
++
++static 
++int nvos_free_alloc(
++    nv_alloc_t *at
++)
++{
++    unsigned int pt_size, i;
++
++    if (at == NULL)
++        return -1;
++
++    if (NV_ATOMIC_READ(at->usage_count))
++        return 1;
++
++    // we keep the page_table around after freeing the pages
++    // for bookkeeping reasons. Free the page_table and assume
++    // the underlying pages are already unlocked and freed.
++    if (at->page_table != NULL)
++    {
++        for (i = 0; i < at->num_pages; i++)
++        {
++            if (at->page_table[i] != NULL)
++                NV_KMEM_CACHE_FREE(at->page_table[i], nv_pte_t, nv_pte_t_cache);
++        }
++        pt_size = at->num_pages * sizeof(nv_pte_t *);
++        os_free_mem(at->page_table);
++    }
++
++    NV_KFREE(at, sizeof(nv_alloc_t));
++
++    return 0;
++}
++
++static u8 nvos_find_agp_capability(struct pci_dev *dev)
++{
++    u16 status;
++    u8  cap_ptr, cap_id;
++
++    pci_read_config_word(dev, PCI_STATUS, &status);
++    status &= PCI_STATUS_CAP_LIST;
++    if (!status)
++        return 0;
++
++    switch (dev->hdr_type) {
++        case PCI_HEADER_TYPE_NORMAL:
++        case PCI_HEADER_TYPE_BRIDGE:
++            pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
++            break;
++        default:
++            return 0;
++    }
++
++    do {
++        cap_ptr &= 0xfc;
++        pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id);
++        if (cap_id == PCI_CAP_ID_AGP)
++            return cap_ptr;
++        pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr);
++    } while (cap_ptr && cap_id != 0xff);
++
++    return 0;
++}
++
++static u8 nvos_find_pci_express_capability(struct pci_dev *dev)
++{
++    u16 status;
++    u8  cap_ptr, cap_id;
++
++    pci_read_config_word(dev, PCI_STATUS, &status);
++    status &= PCI_STATUS_CAP_LIST;
++    if (!status)
++        return 0;
++
++    switch (dev->hdr_type) {
++        case PCI_HEADER_TYPE_NORMAL:
++        case PCI_HEADER_TYPE_BRIDGE:
++            pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
++            break;
++        default:
++            return 0;
++    }
++
++    do {
++        cap_ptr &= 0xfc;
++        pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id);
++        if (cap_id == PCI_CAP_ID_EXP)
++            return cap_ptr;
++        pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr);
++    } while (cap_ptr && cap_id != 0xff);
++
++    return 0;
++}
++
++static struct pci_dev* nvos_get_agp_device_by_class(unsigned int class)
++{
++    struct pci_dev *dev, *fdev;
++    u32 slot, func, fn;
++
++    dev = NV_PCI_GET_CLASS(class << 8, NULL);
++    while (dev) {
++        slot = NV_PCI_SLOT_NUMBER(dev);
++        for (func = 0; func < 8; func++) {
++            fn = PCI_DEVFN(slot, func);
++            fdev = NV_PCI_GET_SLOT(NV_PCI_BUS_NUMBER(dev), fn);
++            if (!fdev)
++                continue;
++            if (nvos_find_agp_capability(fdev)) {
++                NV_PCI_DEV_PUT(dev);
++                return fdev;
++            }
++            NV_PCI_DEV_PUT(fdev);
++        }
++        dev = NV_PCI_GET_CLASS(class << 8, dev);
++    }
++
++    return NULL;
++}
++
++static struct pci_dev* nv_get_pci_device(nv_state_t *nv)
++{
++    struct pci_dev *dev;
++
++    dev = NV_PCI_GET_DEVICE(nv->vendor_id, nv->device_id, NULL);
++    while (dev) {
++        if (NV_PCI_SLOT_NUMBER(dev) == nv->slot
++                && NV_PCI_BUS_NUMBER(dev) == nv->bus)
++            return dev;
++        dev = NV_PCI_GET_DEVICE(nv->vendor_id, nv->device_id, dev);
++    }
++
++    return NULL;
++}
++
++static void nvos_proc_create(void)
++{
++#ifdef CONFIG_PROC_FS
++    struct pci_dev *dev;
++    int i = 0;
++    char name[6];
++
++    struct proc_dir_entry *entry;
++    struct proc_dir_entry *proc_nvidia_agp, *proc_nvidia_cards;
++
++    /* world readable directory */
++    int flags = S_IFDIR | S_IRUGO | S_IXUGO;
++
++    nv_state_t *nv;
++    nv_linux_state_t *nvl;
++    nv_linux_state_t *nv_max_devices;
++
++    proc_nvidia = create_proc_entry("nvidia", flags, proc_root_driver);
++    if (!proc_nvidia)
++        goto failed;
++
++    proc_nvidia_cards = create_proc_entry("cards", flags, proc_nvidia);
++    if (!proc_nvidia_cards)
++        goto failed;
++
++    proc_nvidia_agp = create_proc_entry("agp", flags, proc_nvidia);
++    if (!proc_nvidia_agp)
++        goto failed;
++
++    /*
++     * Set the module owner to ensure that the reference
++     * count reflects accesses to the proc files.
++     */
++    proc_nvidia->owner       = THIS_MODULE;
++    proc_nvidia_cards->owner = THIS_MODULE;
++    proc_nvidia_agp->owner   = THIS_MODULE;
++
++    nv_max_devices = nv_linux_devices + NV_MAX_DEVICES;
++    for (nvl = nv_linux_devices; nvl < nv_max_devices; nvl++) 
++    {
++        nv = NV_STATE_PTR(nvl);
++
++        if (nv->device_id == 0)
++            break;
++
++        /* world readable file */
++        flags = S_IFREG | S_IRUGO;
++
++        dev = nv_get_pci_device(nv);
++        if (!dev)
++            break;
++
++        sprintf(name, "%d", i++);
++        entry = create_proc_entry(name, flags, proc_nvidia_cards);
++        if (!entry) {
++            NV_PCI_DEV_PUT(dev);
++            goto failed;
++        }
++
++        entry->data = nv;
++        entry->read_proc = nv_kern_read_cardinfo;
++        entry->owner = THIS_MODULE;
++
++        if (nvos_find_agp_capability(dev)) {
++            entry = create_proc_entry("status", flags, proc_nvidia_agp);
++            if (!entry) {
++                NV_PCI_DEV_PUT(dev);
++                goto failed;
++            }
++
++            entry->data = nv;
++            entry->read_proc = nv_kern_read_status;
++            entry->owner = THIS_MODULE;
++
++            entry = create_proc_entry("card", flags, proc_nvidia_agp);
++            if (!entry) {
++                NV_PCI_DEV_PUT(dev);
++                goto failed;
++            }
++
++            entry->data = nv;
++            entry->read_proc = nv_kern_read_agpinfo;
++            entry->owner = THIS_MODULE;
++        }
++
++        NV_PCI_DEV_PUT(dev);
++    }
++
++    entry = create_proc_entry("version", flags, proc_nvidia);
++    if (!entry)
++        goto failed;
++
++    entry->read_proc = nv_kern_read_version;
++    entry->owner = THIS_MODULE;
++
++    entry = create_proc_entry("host-bridge", flags, proc_nvidia_agp);
++    if (!entry)
++        goto failed;
++
++    entry->data = NULL;
++    entry->read_proc = nv_kern_read_agpinfo;
++    entry->owner = THIS_MODULE;
++
++    return;
++
++failed:
++    nv_printf(NV_DBG_ERRORS, "NVRM: failed to create /proc entries!\n");
++    nvos_proc_remove_all(proc_nvidia);
++#endif
++}
++
++#ifdef CONFIG_PROC_FS
++static void nvos_proc_remove_all(struct proc_dir_entry *entry)
++{
++    while (entry) {
++        struct proc_dir_entry *next = entry->next;
++        if (entry->subdir)
++            nvos_proc_remove_all(entry->subdir);
++        remove_proc_entry(entry->name, entry->parent);
++        if (entry == proc_nvidia)
++            break;
++        entry = next;
++    }
++}
++#endif
++
++static void nvos_proc_remove(void)
++{
++#ifdef CONFIG_PROC_FS
++    nvos_proc_remove_all(proc_nvidia);
++#endif
++}
++
++/*
++ * Given a virtual address, fid the 'at' that owns it
++ * Uses the physical address as the key.
++ */
++static nv_alloc_t *nvl_find_alloc(
++    nv_linux_state_t    *nvl,
++    unsigned long  address,
++    unsigned long  flags
++)
++{
++    nv_alloc_t *at;
++
++    for (at = nvl->alloc_queue; at; at = at->next)
++    {
++        // make sure this 'at' matches the flags the caller provided
++        // ie, don't mistake a pci allocation with an agp allocation
++        if (!(at->flags & flags))
++            continue;
++
++        // most mappings will be found based on the 'key'
++        if (address == ((unsigned long) at->key_mapping))
++            return at;
++
++        if (at->page_table)
++        {
++            int i;
++            for (i = 0; i < at->num_pages; i++)
++            {
++                unsigned long offset = at->page_table[i]->phys_addr;
++                if ((address >= offset) &&
++                    (address < (offset + PAGE_SIZE)))
++                    return at;
++            }
++        }
++
++    }
++
++    /* failure is not necessarily an error if the caller
++       was just probing an address */
++    nv_printf(NV_DBG_INFO, "NVRM: could not find map for vm 0x%lx\n", address);
++    return NULL;
++}
++
++static int nvl_add_alloc(
++    nv_linux_state_t *nvl, 
++    nv_alloc_t *at
++)
++{
++    nv_down(nvl->at_lock);
++    at->next = nvl->alloc_queue;
++    nvl->alloc_queue = at;
++    nv_up(nvl->at_lock);
++    return 0;
++}
++
++static int nvl_remove_alloc(
++    nv_linux_state_t *nvl, 
++    nv_alloc_t *at
++)
++{
++    nv_alloc_t *tmp, *prev;
++
++    if (nvl->alloc_queue == at)
++    {
++        nvl->alloc_queue = nvl->alloc_queue->next;
++        return 0;
++    }
++
++    for (tmp = prev = nvl->alloc_queue; tmp; prev = tmp, tmp = tmp->next)
++    {
++        if (tmp == at)
++        {
++            prev->next = tmp->next;
++            return 0;
++        }
++    }
++
++    return -1;
++}
++
++#if defined(NV_BUILD_NV_PAT_SUPPORT)
++/*
++ * Private PAT support for use by the NVIDIA driver. This is an
++ * interim solution until the kernel offers PAT support.
++ */
++static int   __check_pat_support       (void);
++static void  __nv_setup_pat_entries    (void *);
++static void  __nv_restore_pat_entries  (void *);
++static int   __nv_enable_pat_support   (void);
++static void  __nv_disable_pat_support  (void);
++
++#define NV_READ_PAT_ENTRIES(pat1, pat2)   rdmsr(IA32_CR_PAT, (pat1), (pat2))
++#define NV_WRITE_PAT_ENTRIES(pat1, pat2)  wrmsr(IA32_CR_PAT, (pat1), (pat2))
++#define NV_PAT_ENTRY(pat, index)          (((pat) & (0xff<<((index)*8)))>>((index)*8))
++
++static inline void __nv_disable_caches(unsigned long *cr4)
++{
++    unsigned long cr0 = read_cr0();
++    write_cr0(((cr0 & (0xdfffffff)) | 0x40000000));
++    wbinvd();
++    *cr4 = read_cr4();
++    if (*cr4 & 0x80) write_cr4(*cr4 & ~0x80);
++    __flush_tlb();
++}
++
++static inline void __nv_enable_caches(unsigned long cr4)
++{
++    unsigned long cr0 = read_cr0();
++    wbinvd();
++    __flush_tlb();
++    write_cr0((cr0 & 0x9fffffff));
++    if (cr4 & 0x80) write_cr4(cr4);
++}
++
++static int __check_pat_support()
++{
++    unsigned int pat1, pat2, i;
++
++    if (!test_bit(X86_FEATURE_PAT, (volatile unsigned long *)&boot_cpu_data.x86_capability))
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: cpu does not support PAT, aborting..\n");
++        return 0;
++    }
++
++    NV_READ_PAT_ENTRIES(pat1, pat2);
++
++    for (i = 0; i < 4; i++)
++    {
++         if (NV_PAT_ENTRY(pat1, i) == 1)
++         {
++             nv_printf(NV_DBG_ERRORS, "NVRM: PAT index %d already configured for Write-Combining!\n", i);
++             nv_printf(NV_DBG_ERRORS, "NVRM: Aborting, due to PAT already being configured\n");
++             return 0;
++         }
++    }
++
++    for (i = 0; i < 4; i++)
++    {
++         if (NV_PAT_ENTRY(pat2, i) == 1)
++         {
++             nv_printf(NV_DBG_ERRORS, "NVRM: PAT index %d already configured for Write-Combining!\n", i + 4);
++             nv_printf(NV_DBG_ERRORS, "NVRM: Aborting, due to PAT already being configured\n");
++             return 0;
++         }
++    }
++
++    return 1;
++}
++
++static unsigned long orig_pat1, orig_pat2;
++
++static void __nv_setup_pat_entries(void *info)
++{
++    unsigned long pat1, pat2, cr4;
++    unsigned long eflags;
++
++    NV_SAVE_FLAGS(eflags);
++    NV_CLI();
++    __nv_disable_caches(&cr4);
++
++    NV_READ_PAT_ENTRIES(pat1, pat2);
++
++    pat1 &= 0xffff00ff;
++    pat1 |= 0x00000100;
++
++    NV_WRITE_PAT_ENTRIES(pat1, pat2);
++
++    __nv_enable_caches(cr4);
++    NV_RESTORE_FLAGS(eflags);
++}
++
++static void __nv_restore_pat_entries(void *info)
++{
++    unsigned long cr4;
++    unsigned long eflags;
++
++    NV_SAVE_FLAGS(eflags);
++    NV_CLI();
++    __nv_disable_caches(&cr4);
++
++    NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2);
++
++    __nv_enable_caches(cr4);
++    NV_RESTORE_FLAGS(eflags);
++}
++
++static int __nv_enable_pat_support()
++{
++    unsigned long pat1, pat2;
++
++    if (pat_enabled)
++        return 1;
++
++    if (!__check_pat_support())
++        return 0;
++
++    NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2);
++    nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2);
++
++#ifdef CONFIG_SMP
++    if (smp_call_function(__nv_setup_pat_entries, NULL, 1, 1) != 0)
++        return 0;
++#endif
++
++    __nv_setup_pat_entries(NULL);
++
++    pat_enabled = 1;
++
++    NV_READ_PAT_ENTRIES(pat1, pat2);
++    nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2);
++
++    return 1;
++}
++
++static void __nv_disable_pat_support()
++{
++    unsigned long pat1, pat2;
++
++    if (!pat_enabled)
++        return;
++
++#ifdef CONFIG_SMP
++    if (smp_call_function(__nv_restore_pat_entries, NULL, 1, 1) != 0)
++        return;
++#endif
++
++    __nv_restore_pat_entries(NULL);
++
++    pat_enabled = 0;
++
++    NV_READ_PAT_ENTRIES(pat1, pat2);
++    nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2);
++}
++
++#endif /* defined(NV_BUILD_NV_PAT_SUPPORT) */
++
++/***
++ *** EXPORTS to Linux Kernel
++ ***/
++
++static int __init nvidia_init_module(void)
++{
++    int rc, i, count;
++
++    memset(nv_linux_devices, 0, sizeof(nv_linux_devices));
++
++    if (pci_register_driver(&nv_pci_driver) < 0)
++    {
++        pci_unregister_driver(&nv_pci_driver); // XXX ???
++        nv_printf(NV_DBG_ERRORS, "NVRM: no NVIDIA graphics adapter found\n");
++        return -ENODEV;
++    }
++
++    count = nvos_count_devices();
++    if (num_nv_devices != count)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: the NVIDIA probe routine was not "
++            "called for %d device(s)!!\n", count - num_nv_devices);
++        if (!num_nv_devices)
++        {
++            nv_printf(NV_DBG_ERRORS, "NVRM: no devices probed, aborting!\n");
++            nv_printf(NV_DBG_ERRORS, "NVRM: this often occurs when rivafb is "
++                "loaded and claims the device's resources.\n");
++            nv_printf(NV_DBG_ERRORS, "NVRM: try removing the rivafb module "
++                "(or reconfiguring your kernel to remove\n");
++            nv_printf(NV_DBG_ERRORS, "NVRM: rivafb support) and then try "
++                "loading the NVIDIA kernel module again.\n");
++            pci_unregister_driver(&nv_pci_driver);
++            return -ENODEV;
++        }
++    }
++
++    nv_printf(NV_DBG_ERRORS, "NVRM: loading %s\n", pNVRM_ID);
++
++    rc = NV_REGISTER_CHRDEV(nv_major, "nvidia", &nv_fops);
++    if (rc < 0)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: register chrdev failed\n");
++        pci_unregister_driver(&nv_pci_driver);
++        return rc;
++    }
++
++#ifdef NV_CLASS_SIMPLE_CREATE_PRESENT
++    class_nvidia = class_simple_create(THIS_MODULE, "nvidia");
++    if (IS_ERR(class_nvidia))
++    {
++        rc = PTR_ERR(class_nvidia);
++        nv_printf(NV_DBG_ERRORS, "NVRM: class_simple creation failed\n");
++        /* goto failed; */
++    }
++#endif
++
++#ifdef CONFIG_DEVFS_FS
++    do
++    {
++        char name[10];
++
++        nv_devfs_handles[0] = NV_DEVFS_REGISTER("nvidiactl", 255);
++
++        for (i = 0; i < num_nv_devices; i++)
++        {
++            sprintf(name, "nvidia%d", i);
++            nv_devfs_handles[i+1] = NV_DEVFS_REGISTER(name, i);
++        }
++
++        for (i = 0; i <= num_nv_devices; i++)
++        {
++            if (nv_devfs_handles[i] == NULL)
++            {
++                rc = -ENOMEM; /* XXX Fix me? (rc) */
++                nv_printf(NV_DBG_ERRORS, "NVRM: devfs register failed\n");
++                goto failed;
++            }
++        }
++    } while(0);
++#endif
++
++#ifdef NV_CLASS_SIMPLE_CREATE_PRESENT
++    if (!IS_ERR(class_nvidia))
++    {
++        /*
++         * XXX We don't consider class_simple errors fatal for
++         * now to avoid unexpected failures.
++         */
++        char name[10];
++
++        class_simple_device_add(class_nvidia,
++            MKDEV(NV_MAJOR_DEVICE_NUMBER, 255), NULL, "nvidiactl");
++
++        for (i = 0; i < num_nv_devices; i++)
++        {
++            sprintf(name, "nvidia%d", i);
++            class_simple_device_add(class_nvidia,
++                MKDEV(NV_MAJOR_DEVICE_NUMBER, i), &nv_linux_devices[i].dev->dev, name);
++        }
++    }
++#endif
++
++    nv_printf(NV_DBG_INFO, "NVRM: major number %d\n", nv_major);
++
++    /* instantiate tasklets */
++    for (i = 0; i < NV_MAX_DEVICES; i++)
++    {
++        /*
++         * We keep one tasklet per card to avoid latency issues with more
++         * than one device; no two instances of a single tasklet are ever
++         * executed concurrently.
++         */
++        NV_ATOMIC_SET(nv_linux_devices[i].tasklet.count, 1);
++    }
++
++    // init the nvidia control device
++    {
++        nv_state_t *nv_ctl = NV_STATE_PTR(&nv_ctl_device);
++        nv_ctl->os_state = (void *) &nv_ctl_device;
++        nv_lock_init_locks(nv_ctl);
++    }
++
++#if defined(NV_PM_SUPPORT_APM)
++    for (i = 0; i < num_nv_devices; i++)
++    {
++        apm_nv_dev[i] = pm_register(PM_PCI_DEV, PM_SYS_VGA, nv_kern_apm_event);
++    }
++#endif
++
++    NV_KMEM_CACHE_CREATE(nv_pte_t_cache, "nv_pte_t", nv_pte_t);
++    if (nv_pte_t_cache == NULL)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: pte cache allocation failed\n");
++        goto failed;
++    }
++ 
++    // Init the resource manager
++    if (!rm_init_rm())
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed\n");
++        rc = -EIO;
++        goto failed;
++    }
++
++    // load our local registry entries into the registry
++    {
++        extern nv_parm_t nv_parms[];
++        rm_load_registry(nv_parms);
++    }
++
++    /* create /proc/driver/nvidia */
++    nvos_proc_create();
++
++#if defined(DEBUG)
++    inter_module_register("nv_linux_devices", THIS_MODULE, nv_linux_devices);
++#endif
++
++    /* Register ioctl conversions for 32 bit clients */
++    rm_register_ioctl_conversions();
++
++#ifdef NV_SWIOTLB
++    // may need a better test than this eventually
++    if (swiotlb)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: WARNING:  You are probably using the kernel's swiotlb interface.\n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: Be very careful with this interface, as it is easy\n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: to exhaust this interface's memory buffer, at which\n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: point it panics the kernel.  Please increase the size\n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: of this buffer by specifying a larger buffer size with\n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: the swiotlb kernel option, eg: \"swiotlb=16384\"\n");
++        nv_swiotlb = 1;
++    }
++#endif
++
++    return 0;
++
++failed:
++    if (nv_pte_t_cache != NULL)
++        NV_KMEM_CACHE_DESTROY(nv_pte_t_cache);
++
++#if defined(NV_PM_SUPPORT_APM)
++    for (i = 0; i < num_nv_devices; i++)
++        if (apm_nv_dev[i] != NULL) pm_unregister(apm_nv_dev[i]);
++#endif
++
++#ifdef CONFIG_DEVFS_FS
++    NV_DEVFS_REMOVE_CONTROL();
++    for (i = 0; i < num_nv_devices; i++)
++        NV_DEVFS_REMOVE_DEVICE(i);
++#endif
++
++    if (NV_UNREGISTER_CHRDEV(nv_major, "nvidia") < 0)
++        nv_printf(NV_DBG_ERRORS, "NVRM: unregister nv chrdev failed\n");
++
++#ifdef NV_CLASS_SIMPLE_CREATE_PRESENT
++    class_simple_device_remove(MKDEV(NV_MAJOR_DEVICE_NUMBER, 255));
++    for (i = 0; i < num_nv_devices; i++)
++        class_simple_device_remove(MKDEV(NV_MAJOR_DEVICE_NUMBER, i));
++    class_simple_destroy(class_nvidia);
++#endif
++
++    for (i = 0; i < num_nv_devices; i++)
++    {
++        if (nv_linux_devices[i].dev)
++        {
++            struct pci_dev *dev = nv_linux_devices[i].dev;
++            release_mem_region(NV_PCI_RESOURCE_START(dev, 1),
++                               NV_PCI_RESOURCE_SIZE(dev, 1));
++        }
++    }
++
++    pci_unregister_driver(&nv_pci_driver);
++    return rc;
++}
++
++static void __exit nvidia_exit_module(void)
++{
++    int i;
++    nv_linux_state_t *nvl, *max_devices;
++
++    nv_printf(NV_DBG_INFO, "NVRM: nvidia_exit_module\n");
++
++#ifdef CONFIG_DEVFS_FS
++    NV_DEVFS_REMOVE_CONTROL();
++    for (i = 0; i < num_nv_devices; i++)
++        NV_DEVFS_REMOVE_DEVICE(i);
++#endif
++
++    if (NV_UNREGISTER_CHRDEV(nv_major, "nvidia") < 0)
++        nv_printf(NV_DBG_ERRORS, "NVRM: unregister nv chrdev failed\n");
++
++#ifdef NV_CLASS_SIMPLE_CREATE_PRESENT
++    class_simple_device_remove(MKDEV(NV_MAJOR_DEVICE_NUMBER, 255));
++    for (i = 0; i < num_nv_devices; i++)
++        class_simple_device_remove(MKDEV(NV_MAJOR_DEVICE_NUMBER, i));
++    class_simple_destroy(class_nvidia);
++#endif
++
++    for (i = 0; i < num_nv_devices; i++)
++    {
++        if (nv_linux_devices[i].dev)
++        {
++            struct pci_dev *dev = nv_linux_devices[i].dev;
++            release_mem_region(NV_PCI_RESOURCE_START(dev, 1),
++                               NV_PCI_RESOURCE_SIZE(dev, 1));
++            NV_PCI_DISABLE_DEVICE(dev);
++        }
++    }
++
++    pci_unregister_driver(&nv_pci_driver);
++
++    /* remove /proc/driver/nvidia */
++    nvos_proc_remove();
++
++#if defined(DEBUG)
++    inter_module_unregister("nv_linux_devices");
++#endif
++
++#if defined(NV_PM_SUPPORT_APM)
++    for (i = 0; i < num_nv_devices; i++)
++    {
++        pm_unregister(apm_nv_dev[i]);
++    }
++#endif
++
++    // Shutdown the resource manager
++    rm_shutdown_rm();
++
++    /*
++     * Make sure we freed up all the mappings. The kernel should
++     * do this automatically before calling close.
++     */
++    max_devices = nv_linux_devices + NV_MAX_DEVICES;
++    for (nvl = nv_linux_devices; nvl < max_devices; nvl++)
++    {
++        nv_state_t *nv = NV_STATE_PTR(nvl);
++
++        if (nvl->alloc_queue)
++        {
++            nv_alloc_t *at = nvl->alloc_queue;
++            while (at)
++            {
++                NV_PRINT_AT(at);
++                at = at->next;
++            }
++        }
++
++        if (nv->flags & NV_FLAG_MAP_REGS_EARLY)
++        {
++            NV_ASSERT("regs not mapped when they should be!",
++                      nv->bar.regs.map != NULL);
++            os_unmap_kernel_space(nv->bar.regs.map, nv->bar.regs.size);
++            nv->bar.regs.map = NULL;
++            nv->bar.regs.map_u = (nv_phwreg_t) NULL;
++        }
++    }
++
++    /* Unregister ioctl conversions for 32 bit clients */
++    rm_unregister_ioctl_conversions();
++
++#if defined(NV_BUILD_NV_PAT_SUPPORT)
++    if (pat_enabled)
++    {
++        __nv_disable_pat_support();
++    }
++#endif
++
++#if NV_ENABLE_MEM_TRACKING
++    nv_list_mem(vm_list);
++    nv_list_mem(km_list);
++    nv_list_mem(fp_list);
++    nv_printf(NV_DBG_ERRORS, "NVRM: final mem usage: vm 0x%x km 0x%x fp 0x%x\n",
++        vm_usage, km_usage, fp_usage);
++#endif
++
++    NV_KMEM_CACHE_DESTROY(nv_pte_t_cache);
++}
++
++module_init(nvidia_init_module);
++module_exit(nvidia_exit_module);
++
++
++/* this is only called when the vmas are duplicated.
++ * this appears to only happen when the process is cloned to create
++ * a new process, and not when the process is threaded.
++ *
++ * increment the usage count for the physical pages, so when this
++ * clone unmaps the mappings, the pages are not deallocated under
++ * the original process.
++ */
++void
++nv_kern_vma_open(struct vm_area_struct *vma)
++{
++    nv_printf(NV_DBG_MEMINFO, "NVRM: VM: vma_open for 0x%x - 0x%x, offset 0x%x\n",
++        vma->vm_start, vma->vm_end, NV_VMA_OFFSET(vma));
++
++    if (NV_VMA_PRIVATE(vma))
++    {
++        nv_alloc_t *at = (nv_alloc_t *) NV_VMA_PRIVATE(vma);
++        NV_ATOMIC_INC(at->usage_count);
++
++        nv_printf(NV_DBG_MEMINFO,
++            "NVRM: VM:   at 0x%x, usage count %d, page_table 0x%x\n",
++            at, at->usage_count, at->page_table);
++
++        nv_vm_list_page_count(at->page_table, at->num_pages);
++    }
++}
++
++
++void
++nv_kern_vma_release(struct vm_area_struct *vma)
++{
++    nv_printf(NV_DBG_MEMINFO,
++        "NVRM: VM: vma_release for 0x%x - 0x%x, offset 0x%x\n",
++        vma->vm_start, vma->vm_end, NV_VMA_OFFSET(vma));
++
++    if (NV_VMA_PRIVATE(vma))
++    {
++        nv_alloc_t *at = (nv_alloc_t *) NV_VMA_PRIVATE(vma);
++
++        NV_ATOMIC_DEC(at->usage_count);
++
++        nv_printf(NV_DBG_MEMINFO,
++            "NVRM: VM:  at 0x%x, usage count %d, page_table 0x%x\n",
++            at, at->usage_count, at->page_table);
++
++        nv_vm_list_page_count(at->page_table, at->num_pages);
++
++        // if usage_count is down to 0, the kernel virtual mapping was freed
++        // but the underlying physical pages were not, due to the reserved bit
++        // being set. We need to clear the reserved bit, then munmap will
++        // zap the pages and free the physical pages.
++        if (NV_ATOMIC_READ(at->usage_count) == 0)
++        {
++            if (at->page_table)
++                nv_vm_unlock_pages(at);
++            nvos_free_alloc(at);
++            NV_VMA_PRIVATE(vma) = NULL;
++        }
++    }
++}
++
++
++/* at this point, this code just plain won't work with 2.2 kernels.
++ * additionally, only ia64 & the 460GX need a nopage handler, and 2.2 doesn't
++ * work on ia64 anyways. It's expected that at some point other agp chipsets
++ * will work similar to the 460GX (AGP 3.0 spec), so pre-emptively make sure
++ * this works on our standard ia32 driver.
++ */
++
++/* AGP allocations under the 460GX are not mapped to the aperture
++ * addresses by the CPU.  This nopage handler will fault on CPU
++ * accesses to AGP memory and map the address to the correct page.
++ */
++struct page *nv_kern_vma_nopage(
++    struct vm_area_struct *vma,
++    unsigned long address,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))
++    int *type
++#else
++    int write_access
++#endif
++)
++{
++#if defined(NVCPU_IA64) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 9))
++    nv_alloc_t *at, *tmp;
++    nv_linux_state_t *nvl;
++    nv_state_t *nv;
++    struct page *page_ptr;
++    int rm_status, index;
++
++    at = NV_VMA_PRIVATE(vma);
++    if (at == NULL)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: nopage handler called without an at: "
++                  "vm_start 0x%x, at 0x%x\n", vma->vm_start, at);
++        return NOPAGE_SIGBUS;
++    }
++
++    // let's verify this 'at' is valid
++    // I can imagine cases where something went wrong, the 'at' and underlying
++    // pages were freed, but the virtual mapping still exists and this 'at'
++    // pointer is potentially pointing to freed memory. Let's make sure we can
++    // still find the 'at' in our alloc_queue.
++    nvl = NVL_FROM_FILEP(vma->vm_file);
++    if (nvl == NULL)
++        return NOPAGE_SIGBUS;
++
++    nv = (nv_state_t *) nvl;
++
++    rm_status = RM_ERROR;
++    tmp = nvl->alloc_queue;
++    while (tmp)
++    {
++        if (tmp == at)
++        {
++            rm_status = RM_OK;
++            break;
++        }
++        tmp = tmp->next;
++    }
++
++    if (rm_status != RM_OK)
++    {
++        // we didn't find the 'at' (and haven't dereferenced it yet).
++        // let's bail before something bad happens, but first print an
++        // error message and NULL the pointer out so we don't come this
++        // far again
++        nv_printf(NV_DBG_ERRORS, "NVRM: nopage handler called on a freed"
++                  "address: vm_start 0x%x, at 0x%x\n", vma->vm_start, at);
++        NV_VMA_PRIVATE(vma) = NULL;
++        return NOPAGE_SIGBUS;
++    }
++
++    rm_status = KernMapAGPNopage((void *)address, vma, at->priv_data, 
++                                 (void **)&page_ptr);
++    if (rm_status)
++        return NOPAGE_SIGBUS;
++
++    // get the index of this page into the allocation
++    index = (address - vma->vm_start)>>PAGE_SHIFT;
++
++    // save that index into our page list (make sure it doesn't already exist)
++    if (at->page_table[index]->phys_addr)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: page slot already filled in nopage handler!\n");
++        os_dbg_breakpoint();
++    }
++
++    at->page_table[index]->phys_addr = (page_to_pfn(page_ptr) << PAGE_SHIFT);
++    at->page_table[index]->dma_addr  = (page_to_pfn(page_ptr) << PAGE_SHIFT);
++    at->page_table[index]->virt_addr = (unsigned long) __va(page_to_pfn(page_ptr) << PAGE_SHIFT);
++
++    return page_ptr;
++#endif
++    return NOPAGE_SIGBUS;
++}
++
++struct vm_operations_struct nv_vm_ops = {
++    .open   = nv_kern_vma_open,
++    .close  = nv_kern_vma_release,  /* "close" */
++    .nopage = nv_kern_vma_nopage,
++};
++
++static nv_file_private_t *
++nv_alloc_file_private(void)
++{
++    nv_file_private_t *nvfp;
++
++    NV_KMALLOC(nvfp, sizeof(nv_file_private_t));
++    if (!nvfp)
++        return NULL;
++
++    memset(nvfp, 0, sizeof(nv_file_private_t));
++
++    // initialize this file's event queue
++    init_waitqueue_head(&nvfp->waitqueue);
++
++    nv_init_lock(nvfp->fp_lock);
++
++    NV_KMALLOC(nvfp->event_fifo, sizeof(nv_event_t) * NV_EVENT_FIFO_SIZE);
++    if (nvfp->event_fifo == NULL)
++    {
++        NV_KFREE(nvfp, sizeof(nv_file_private_t));
++        return NULL;
++    }
++
++    return nvfp;
++}
++
++static void
++nv_free_file_private(nv_file_private_t *nvfp)
++{
++    if (nvfp == NULL)
++        return;
++
++    NV_KFREE(nvfp->event_fifo, sizeof(nv_event_t) * NV_EVENT_FIFO_SIZE);
++    NV_KFREE(nvfp, sizeof(nv_file_private_t));
++}
++
++
++/*
++** nv_kern_open
++**
++** nv driver open entry point.  Sessions are created here.
++*/
++int nv_kern_open(
++    struct inode *inode,
++    struct file *file
++)
++{
++    nv_state_t *nv = NULL;
++    nv_linux_state_t *nvl = NULL;
++    int devnum;
++    int rc = 0, status;
++
++    nv_printf(NV_DBG_INFO, "NVRM: nv_kern_open...\n");
++
++    FILE_PRIVATE(file) = nv_alloc_file_private();
++    if (FILE_PRIVATE(file) == NULL)
++        return -ENOMEM;
++
++    /* for control device, just jump to its open routine */
++    /* after setting up the private data */
++    if (NV_IS_CONTROL_DEVICE(inode))
++        return nv_kern_ctl_open(inode, file);
++
++    /* what device are we talking about? */
++    devnum = NV_DEVICE_NUMBER(inode);
++    if (devnum >= NV_MAX_DEVICES)
++    {
++        nv_free_file_private(FILE_PRIVATE(file));
++        FILE_PRIVATE(file) = NULL;
++        return -ENODEV;
++    }
++
++    nvl = &nv_linux_devices[devnum];
++    nv = NV_STATE_PTR(nvl);
++
++    nv_printf(NV_DBG_INFO, "NVRM: nv_kern_open on device %d\n", devnum);
++    nv_down(nvl->ldata_lock);
++
++    NV_CHECK_PCI_CONFIG(nv);
++
++    NVL_FROM_FILEP(file) = nvl;
++
++    /*
++     * map the memory and allocate isr on first open
++     */
++
++    if ( ! (nv->flags & NV_FLAG_OPEN))
++    {
++        if (nv->device_id == 0)
++        {
++            nv_printf(NV_DBG_ERRORS, "NVRM: open of nonexistent device %d\n",
++                devnum);
++            rc = -ENXIO;
++            goto failed;
++        }
++
++        status = request_irq(nv->interrupt_line, nv_kern_isr,
++                             SA_INTERRUPT | SA_SHIRQ, "nvidia",
++                             (void *) nvl);
++        if (status != 0)
++        {
++            if ( nv->interrupt_line && (status == -EBUSY) )
++            {
++                nv_printf(NV_DBG_ERRORS,
++                    "NVRM: Tried to get irq %d, but another driver",
++                    (unsigned int) nv->interrupt_line);
++                nv_printf(NV_DBG_ERRORS, "NVRM: has it and is not sharing it.\n");
++                nv_printf(NV_DBG_ERRORS, "NVRM: you may want to verify that an audio driver");
++                nv_printf(NV_DBG_ERRORS, " isn't using the irq\n");
++            }
++            nv_printf(NV_DBG_ERRORS, "NVRM: isr request failed 0x%x\n", status);
++            rc = -EIO;
++            goto failed;
++        }
++
++        if ( ! rm_init_adapter(nv))
++        {
++            free_irq(nv->interrupt_line, (void *) nvl);
++            nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_adapter failed\n");
++            rc = -EIO;
++            goto failed;
++        }
++
++        nvl->tasklet.func = nv_kern_isr_bh;
++        nvl->tasklet.data = (unsigned long) nv;
++        tasklet_enable(&nvl->tasklet);
++
++        nv->flags |= NV_FLAG_OPEN;
++    }
++
++    NV_ATOMIC_INC(nvl->usage_count);
++
++ failed:
++    nv_up(nvl->ldata_lock);
++
++    if ((rc) && FILE_PRIVATE(file))
++    {
++        nv_free_file_private(FILE_PRIVATE(file));
++        FILE_PRIVATE(file) = NULL;
++    }
++
++    return rc;
++}
++
++
++/*
++** nv_kern_close
++**
++** Master driver close entry point.
++*/
++
++int nv_kern_close(
++    struct inode *inode,
++    struct file *file
++)
++{
++    nv_linux_state_t *nvl = NVL_FROM_FILEP(file);
++    nv_state_t *nv = NV_STATE_PTR(nvl);
++
++    NV_CHECK_PCI_CONFIG(nv);
++
++    /* for control device, just jump to its open routine */
++    /* after setting up the private data */
++    if (NV_IS_CONTROL_DEVICE(inode))
++        return nv_kern_ctl_close(inode, file);
++
++    nv_printf(NV_DBG_INFO, "NVRM: nv_kern_close on device %d\n",
++        NV_DEVICE_NUMBER(inode));
++
++    rm_free_unused_clients(nv, current->pid, (void *) file);
++
++    nv_down(nvl->ldata_lock);
++    if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
++    {
++        /*
++         * The usage count for this device has dropped to zero, it can be shut
++         * down safely; disable its interrupts.
++         */
++        rm_disable_adapter(nv);
++
++        /*
++         * Disable this device's tasklet to make sure that no bottom half will
++         * run with undefined device state.
++         */
++        tasklet_disable(&nvl->tasklet);
++
++        /*
++         * Free the IRQ, which may block until all pending interrupt processing
++         * has completed.
++         */
++        free_irq(nv->interrupt_line, (void *) nvl);
++
++        rm_shutdown_adapter(nv);
++
++        /*
++         * Make sure we have freed up all the mappings. The kernel
++         * should do this automagically before calling close
++         */
++        if (nvl->alloc_queue)
++        {
++            nv_alloc_t *at = nvl->alloc_queue;
++            while (at)
++            {
++                NV_PRINT_AT(at);
++                at = at->next;
++            }
++        }
++
++        /* leave INIT flag alone so we don't reinit every time */
++        nv->flags &= ~NV_FLAG_OPEN;
++    }
++    nv_up(nvl->ldata_lock);
++
++    if (FILE_PRIVATE(file))
++    {
++        nv_free_file_private(FILE_PRIVATE(file));
++        FILE_PRIVATE(file) = NULL;
++    }
++
++    return 0;
++}
++
++int nv_kern_mmap(
++    struct file  *file,
++    struct vm_area_struct *vma
++)
++{
++    int pages;
++    nv_alloc_t *at;
++    nv_linux_state_t *nvl = NVL_FROM_FILEP(file);
++    nv_state_t *nv = NV_STATE_PTR(nvl);
++
++    nv_printf(NV_DBG_MEMINFO, "NVRM: VM: mmap([0x%p-0x%p] off=0x%lx)\n",
++        vma->vm_start,
++        vma->vm_end,
++        NV_VMA_OFFSET(vma));
++
++    NV_CHECK_PCI_CONFIG(nv);
++
++    // be a bit paranoid for now
++    if ( NV_MASK_OFFSET(vma->vm_start) ||
++         NV_MASK_OFFSET(vma->vm_end))
++    {
++        nv_printf(NV_DBG_ERRORS, 
++            "NVRM: bad mmap range: %lx - %lx\n",
++            vma->vm_start, vma->vm_end);
++        return -ENXIO;
++    }
++
++#if defined(NVCPU_X86)
++    // check for addresses > 32-bits
++    if (vma->vm_pgoff & ~0xfffff)
++    {
++        nv_printf(NV_DBG_ERRORS, 
++            "NVRM: bad mmap offset: %lx\n", vma->vm_pgoff);
++        return -ENXIO;
++    }
++#endif
++
++    pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++
++    // we have our own version to keep the module count right
++    vma->vm_ops = &nv_vm_ops;
++
++    /* NV reg space */
++    if (IS_REG_OFFSET(nv, NV_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
++    {
++        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++        if (NV_REMAP_PAGE_RANGE(vma->vm_start,
++                             NV_VMA_OFFSET(vma),
++                             vma->vm_end - vma->vm_start,
++                             vma->vm_page_prot))
++            return -EAGAIN;
++
++        /* mark it as IO so that we don't dump it on core dump */
++        vma->vm_flags |= VM_IO;
++    }
++
++    /* NV fb space */
++    else if (IS_FB_OFFSET(nv, NV_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
++    {
++        vma->vm_page_prot = pgprot_noncached_weak(vma->vm_page_prot);
++        if (NV_REMAP_PAGE_RANGE(vma->vm_start,
++                             NV_VMA_OFFSET(vma),
++                             vma->vm_end - vma->vm_start,
++                             vma->vm_page_prot))
++            return -EAGAIN;
++
++        // mark it as IO so that we don't dump it on core dump
++        vma->vm_flags |= VM_IO;
++    }
++
++    /* AGP allocator */
++    else if (IS_AGP_OFFSET(nv, NV_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
++    {
++        nv_down(nvl->at_lock);
++        at = nvl_find_alloc(nvl, NV_VMA_OFFSET(vma), NV_ALLOC_TYPE_AGP);
++
++        if (at == NULL)
++        {
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM: couldn't find pre-allocated agp memory!\n");
++            nv_up(nvl->at_lock);
++            return -EAGAIN;
++        }
++
++        if (at->num_pages != pages)
++        {
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM: pre-allocated agp memory has wrong number of pages!\n");
++            nv_up(nvl->at_lock);
++            return -EAGAIN;
++        }
++
++        NV_VMA_PRIVATE(vma) = at;
++        NV_ATOMIC_INC(at->usage_count);
++        nv_up(nvl->at_lock);
++
++        if (NV_OSAGP_ENABLED(nv))
++        {
++#if !defined(NVCPU_IA64) || (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
++            KernMapAGPPages(vma, at->priv_data);
++#else
++            /* Note: on IA64 the AGP chipset is cache coherent, so we
++             * leave the AGP allocation mapped cached. */
++#endif
++        }
++        else
++        {
++            struct vm_area_struct *_vma = vma;
++            rm_map_agp_pages(nv, (void **) &_vma, at->class, at->priv_data);
++        }
++        nv_vm_list_page_count(at->page_table, at->num_pages);
++
++        // mark it as IO so that we don't dump it on core dump
++        vma->vm_flags |= VM_IO;
++    }
++
++    /* Magic allocator */
++    else // if (NV_VMA_OFFSET(vma) == NV_MMAP_ALLOCATION_OFFSET)
++    {
++        unsigned long page = 0, pos, start;
++        int i = 0;
++
++        nv_down(nvl->at_lock);
++        at = nvl_find_alloc(nvl, NV_VMA_OFFSET(vma), NV_ALLOC_TYPE_PCI);
++
++        if (at == NULL)
++        {
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM: couldn't find pre-allocated memory!\n");
++            nv_up(nvl->at_lock);
++            return -EAGAIN;
++        }
++
++        if (at->num_pages != pages)
++        {
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM: pre-allocated sys memory has wrong number of pages!\n");
++            nv_up(nvl->at_lock);
++            return -EAGAIN;
++        }
++
++        // allow setting or refusal of specific caching types
++        switch (NV_ALLOC_MAPPING(at->flags))
++        {
++            case NV_MEMORY_DEFAULT:
++            case NV_MEMORY_WRITEBACK:
++                break;
++            case NV_MEMORY_UNCACHED:
++                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++                break;
++            case NV_MEMORY_WRITECOMBINED:
++#if defined(NV_BUILD_NV_PAT_SUPPORT)
++                if (pat_enabled)
++                {
++                    vma->vm_page_prot = pgprot_writecombined(vma->vm_page_prot);
++                    break;
++                }
++#endif
++            case NV_MEMORY_WRITETHRU:
++            case NV_MEMORY_WRITEPROTECT:
++            default:
++                nv_printf(NV_DBG_INFO,
++                    "NVRM: memory caching type 0x%x not supported!\n",
++                    NV_ALLOC_MAPPING(at->flags));
++                nv_up(nvl->at_lock);
++                return -EAGAIN;
++        }
++
++        NV_VMA_PRIVATE(vma) = at;
++        NV_ATOMIC_INC(at->usage_count);
++        nv_up(nvl->at_lock);
++
++        nv_printf(NV_DBG_INFO, "NVRM: remapping %d system pages for at 0x%x\n",
++            pages, at);
++        start = vma->vm_start;
++        while (pages--)
++        {
++            page = (unsigned long) at->page_table[i++]->phys_addr;
++            if (NV_REMAP_PAGE_RANGE(start, page, PAGE_SIZE, vma->vm_page_prot))
++                return -EAGAIN;
++            start += PAGE_SIZE;
++            pos += PAGE_SIZE;
++        }
++        nv_vm_list_page_count(at->page_table, at->num_pages);
++
++        /* prevent the swapper from swapping it out */
++        /* mark the memory i/o so the buffers aren't dumped on core dumps */
++        vma->vm_flags |= (VM_IO | VM_LOCKED);
++    }
++
++    vma->vm_file = file;
++
++    return 0;
++}
++
++
++unsigned int nv_kern_poll(
++    struct file *file,
++    poll_table *wait
++)
++{
++    unsigned int mask = 0;
++    nv_file_private_t *nvfp;
++    nv_linux_state_t *nvl;
++    unsigned long eflags;
++
++    nvl = NVL_FROM_FILEP(file);
++
++    if (NV_STATE_PTR(nvl)->device_number == NV_CONTROL_DEVICE_NUMBER)
++        return nv_kern_ctl_poll (file, wait);
++
++    nvfp = NV_GET_NVFP(file);
++
++    if ( !(file->f_flags & O_NONBLOCK))
++    {
++        // add us to the list
++        poll_wait(file, &nvfp->waitqueue, wait);
++    }
++
++    nv_lock_irq(nvfp->fp_lock, eflags);
++
++    // wake the user on any event
++    if (nvfp->num_events)
++    {
++        nv_printf(NV_DBG_EVENTINFO, "NVRM: Hey, an event occured!\n");
++        // trigger the client, when they grab the event, 
++        // we'll decrement the event count
++        mask |= (POLLPRI|POLLIN);
++    }
++
++    nv_unlock_irq(nvfp->fp_lock, eflags);
++
++    return mask;
++}
++
++//
++// nv_kern_ioctl
++//
++// nv driver ioctl entry point.
++//
++
++/*
++ * some ioctl's can only be done on actual device, others only on the control device
++ */
++#define CTL_DEVICE_ONLY(nv) { if ( ! ((nv)->flags & NV_FLAG_CONTROL)) { status = -EINVAL; goto done; } }
++
++#define ACTUAL_DEVICE_ONLY(nv) { if ((nv)->flags & NV_FLAG_CONTROL) { status = -EINVAL; goto done; } }
++
++
++/* todo:
++   need ioctl to raise a thread priority that is not superuser
++       set its priority to SCHED_FIFO which is simple
++       priority scheduling w/ disabled timeslicing
++*/
++
++int nv_kern_ioctl(
++    struct inode *inode,
++    struct file *file,
++    unsigned int cmd,
++    unsigned long i_arg)
++{
++    int status = 0;
++    nv_linux_state_t *nvl;
++    nv_state_t *nv;
++    void *arg = (void *) i_arg;
++    void *arg_copy;
++    int arg_size;
++
++    nvl = NVL_FROM_FILEP(file);
++    nv = NV_STATE_PTR(nvl);
++
++    nv_printf(NV_DBG_INFO, "NVRM: ioctl(0x%x, 0x%x, 0x%x)\n",
++        _IOC_NR(cmd), (unsigned int) i_arg, _IOC_SIZE(cmd));
++
++    NV_CHECK_PCI_CONFIG(nv);
++
++    arg_size = _IOC_SIZE(cmd);
++    NV_KMALLOC(arg_copy, arg_size);
++    if (arg_copy == NULL)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate ioctl memory\n");
++        return -ENOMEM;
++    }
++
++    if (copy_from_user(arg_copy, arg, arg_size))
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy ioctl data\n");
++        NV_KFREE(arg_copy, arg_size);
++        return -ENOMEM;
++    }
++
++    switch (_IOC_NR(cmd))
++    {
++        /* pass out info about the card */
++        case NV_ESC_CARD_INFO:
++        {
++            nv_ioctl_card_info_t *ci;
++            nv_linux_state_t *tnvl;
++            nv_ioctl_rm_api_version_t *rm_api;
++            int i;
++
++            CTL_DEVICE_ONLY(nv);
++
++            /* the first element of card info passed from the client will have
++             * the rm_api_version_magic value to show that the client is new
++             * enough to support versioning. If the client is too old to 
++             * support versioning, our mmap interfaces are probably different
++             * enough to cause serious damage.
++             * just copy in the one dword to check.
++             */
++            rm_api = arg_copy;
++            if ((rm_api->magic   != NV_RM_API_VERSION_MAGIC_REQ) ||
++                (rm_api->version != NV_RM_API_VERSION))
++            {
++                if (rm_api->magic != NV_RM_API_VERSION_MAGIC_REQ)
++                {
++                    nv_printf(NV_DBG_ERRORS, 
++                        "NVRM: client does not support versioning!!\n");
++                } else
++                if (rm_api->version != NV_RM_API_VERSION)
++                {
++                    nv_printf(NV_DBG_ERRORS, 
++                        "NVRM: client supports wrong rm api version!!\n");
++                }
++                nv_printf(NV_DBG_ERRORS,
++                    "NVRM:    aborting to avoid catastrophe!\n");
++                rm_api->magic   = NV_RM_API_VERSION_MAGIC_REP;
++                rm_api->version = NV_RM_API_VERSION;
++                rm_api->major   = NV_MAJOR_VERSION;
++                rm_api->minor   = NV_MINOR_VERSION;
++                rm_api->patch   = NV_PATCHLEVEL;
++                status = -EINVAL;
++                break;
++            }
++
++            ci = arg_copy;
++            memset(ci, 0, arg_size);
++            for (i = 0, tnvl = nv_linux_devices; tnvl < nv_linux_devices + NV_MAX_DEVICES; tnvl++, i++)
++            {
++                nv_state_t *tnv = NV_STATE_PTR(tnvl);
++                if (tnv->device_id)
++                {
++                    ci->flags = NV_IOCTL_CARD_INFO_FLAG_PRESENT;
++                    ci->bus = tnv->bus;
++                    ci->slot = tnv->slot;
++                    ci->vendor_id = tnv->vendor_id;
++                    ci->device_id = tnv->device_id;
++                    ci->interrupt_line = tnv->interrupt_line;
++                    ci->reg_address = tnv->bar.regs.address;
++                    ci->reg_size = tnv->bar.regs.size;
++                    ci->fb_address = tnv->bar.fb.address;
++                    ci->fb_size = tnv->bar.fb.size;
++                    ci++;
++                }
++            }
++            break;
++        }
++
++        /* set a card to be posted */
++        case NV_ESC_POST_VBIOS:
++        {
++            nv_ioctl_post_vbios_t *params = arg_copy;
++
++            CTL_DEVICE_ONLY(nv);
++
++            status = nvos_post_vbios(params);
++
++            break;
++        }
++
++        /* get the sim environment info for this setup */
++        case NV_ESC_SIM_ENV:
++        {
++            nv_ioctl_sim_env_t *simenv = arg_copy;
++
++            CTL_DEVICE_ONLY(nv);
++
++            simenv->sim_env = nv->sim_env;
++
++            break;
++        }
++
++        case NV_ESC_RM_API_VERSION:
++        {
++            nv_ioctl_rm_api_version_t *rm_api = arg_copy;
++
++            CTL_DEVICE_ONLY(nv);
++
++            rm_api->version = NV_RM_API_VERSION;
++            rm_api->major   = NV_MAJOR_VERSION;
++            rm_api->minor   = NV_MINOR_VERSION;
++            rm_api->patch   = NV_PATCHLEVEL;
++
++            break;
++        }
++
++
++        default:
++            status = rm_ioctl(nv, file, _IOC_NR(cmd), arg_copy) ? 0 : -EINVAL;
++            break;
++    }
++
++ done:
++    if (RM_OK != copy_to_user(arg, arg_copy, arg_size))
++        nv_printf(NV_DBG_ERRORS, "NVRM: failed to copyout ioctl data\n");
++    NV_KFREE(arg_copy, arg_size);
++    return status;
++}
++
++/*
++ * driver receives an interrupt
++ *    if someone waiting, then hand it off.
++ */
++irqreturn_t nv_kern_isr(
++    int   irq,
++    void *arg,
++    struct pt_regs *regs
++)
++{
++    nv_linux_state_t *nvl = (void *) arg;
++    nv_state_t *nv = NV_STATE_PTR(nvl);
++    U032 need_to_run_bottom_half = 0;
++
++    NV_CHECK_PCI_CONFIG(nv);
++    rm_isr(nv->device_number, &need_to_run_bottom_half);
++    if (need_to_run_bottom_half)
++    {
++        tasklet_schedule(&nvl->tasklet);
++    }
++
++    return IRQ_HANDLED;
++}
++
++void nv_kern_isr_bh(
++    unsigned long data
++)
++{
++    nv_state_t *nv = (nv_state_t *) data;
++    /*
++     * XXX: This level of indirection is necessary to work around
++     * problems with Linux kernels using a non-standard calling
++     * convention, i.e. Arjan van de Ven's/RedHat's 2.6.0 kernels.
++     */
++    NV_CHECK_PCI_CONFIG(nv);
++    rm_isr_bh(nv->pdev);
++}
++
++void nv_kern_rc_timer(
++    unsigned long data
++)
++{
++    nv_linux_state_t *nvl = (nv_linux_state_t *) data;
++
++    // nv_printf(NV_DBG_INFO, "NVRM: rc timer\n");
++
++    NV_CHECK_PCI_CONFIG((nv_state_t *) data);
++    rm_run_rc_callback((nv_state_t *) data);
++    mod_timer(&nvl->rc_timer, jiffies + HZ);  /* set another timeout in 1 second */
++}
++
++#if defined(NV_PM_SUPPORT_APM)
++/* kernel calls us with a power management event */
++int
++nv_kern_apm_event(
++    struct pm_dev *dev,
++    pm_request_t rqst,
++    void *data
++)
++{
++    nv_state_t *nv;
++    nv_linux_state_t *lnv;
++    int devnum;
++    int status = RM_OK;
++
++    nv_printf(NV_DBG_INFO, "NVRM: nv_kern_apm_event: %d (0x%p)\n", rqst, data);
++
++    for (devnum = 0; devnum < num_nv_devices; devnum++)
++    {
++        if (apm_nv_dev[devnum] == dev)
++        {
++            break;
++        }
++    }
++
++    if (devnum == num_nv_devices)
++    {
++        nv_printf(NV_DBG_WARNINGS, "NVRM: APM: invalid device!\n");
++        return 1;
++    }
++
++    lnv = &nv_linux_devices[devnum];
++    nv = NV_STATE_PTR(lnv);
++
++    if (nv->pdev == NULL)
++    {
++        nv_printf(NV_DBG_WARNINGS, "NVRM: APM: device not initialized!\n");
++        return 1;
++    }
++
++    NV_CHECK_PCI_CONFIG(NV_STATE_PTR(lnv));
++
++    switch (rqst)
++    {
++#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
++        case PM_RESUME:
++            nv_printf(NV_DBG_INFO, "NVRM: APM: received resume event\n");
++            status = rm_power_management(nv, 0, NV_PM_APM_RESUME);
++            break;
++
++        case PM_SUSPEND:
++            nv_printf(NV_DBG_INFO, "NVRM: APM: received suspend event\n");
++            status = rm_power_management(nv, 0, NV_PM_APM_SUSPEND);
++            break;
++#endif
++        default:
++            nv_printf(NV_DBG_WARNINGS, "NVRM: APM: unsupported event: %d\n", rqst);
++            return 1;
++    }
++
++    if (status != RM_OK)
++        nv_printf(NV_DBG_ERRORS, "NVRM: APM: failed event: %d\n", rqst);
++
++    return status;
++}
++#endif
++
++/*
++** nv_kern_ctl_open
++**
++** nv control driver open entry point.  Sessions are created here.
++*/
++int nv_kern_ctl_open(
++    struct inode *inode,
++    struct file *file
++)
++{
++    nv_state_t *nv;
++    nv_linux_state_t *nvl;
++    int rc = 0;
++
++    nvl = &nv_ctl_device;
++    nv = (nv_state_t *) nvl;
++
++    nv_printf(NV_DBG_INFO, "NVRM: nv_kern_ctl_open\n");
++
++    nv_down(nvl->ldata_lock);
++
++    nv->device_number = NV_CONTROL_DEVICE_NUMBER;
++
++    /* save the nv away in file->private_data */
++    NVL_FROM_FILEP(file) = nvl;
++
++    if (NV_ATOMIC_READ(nvl->usage_count) == 0)
++    {
++        init_waitqueue_head(&nv_ctl_waitqueue);
++    }
++
++    nv->flags |= NV_FLAG_OPEN + NV_FLAG_CONTROL;
++
++    /* turn off the hotkey occurred bit */
++    nv->flags &= ~NV_FLAG_HOTKEY_OCCURRED;
++
++    NV_ATOMIC_INC(nvl->usage_count);
++    nv_up(nvl->ldata_lock);
++
++    return rc;
++}
++
++
++/*
++** nv_kern_ctl_close
++*/
++int nv_kern_ctl_close(
++    struct inode *inode,
++    struct file *file
++)
++{
++    nv_linux_state_t *nvl =  NVL_FROM_FILEP(file);
++    nv_state_t *nv = NV_STATE_PTR(nvl);
++
++    nv_printf(NV_DBG_INFO, "NVRM: nv_kern_ctl_close\n");
++
++    nv_down(nvl->ldata_lock);
++    if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
++    {
++        nv->flags = 0;
++    }
++    nv_up(nvl->ldata_lock);
++
++    rm_free_unused_clients(nv, current->pid, (void *) file);
++
++    if (FILE_PRIVATE(file))
++    {
++        nv_free_file_private(FILE_PRIVATE(file));
++        FILE_PRIVATE(file) = NULL;
++    }
++
++    return 0;
++}
++
++
++/*
++ * nv_kern_ctl_poll() - add the process to the wait queue
++ */
++
++unsigned int nv_kern_ctl_poll(
++    struct file *file,
++    poll_table *wait
++)
++{
++    nv_linux_state_t *nvl;
++    nv_state_t *nv;
++    unsigned int ret = 0;
++
++    nvl = NVL_FROM_FILEP(file);
++    nv = NV_STATE_PTR(nvl);
++
++    if ( !(file->f_flags & O_NONBLOCK) )
++    {
++        poll_wait(file, &nv_ctl_waitqueue, wait);
++    }
++
++    nv_lock_rm(nv);
++    if (nv->flags & NV_FLAG_HOTKEY_OCCURRED)
++    {
++        nv_printf(NV_DBG_EVENTINFO, "NVRM: a hotkey event has occurred\n");
++        nv->flags &= ~NV_FLAG_HOTKEY_OCCURRED;
++        ret = POLLIN | POLLRDNORM;
++    }
++    nv_unlock_rm(nv);
++
++    return ret;
++}
++
++
++
++
++/*
++ * nv_set_hotkey_occurred_flag() - set the hotkey flag and wake up anybody
++ * waiting on the wait queue
++ */
++
++void NV_API_CALL nv_set_hotkey_occurred_flag(void)
++{
++    nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device);
++
++    nv_printf(NV_DBG_EVENTINFO, "NVRM: setting the hotkey occurred flag!\n");
++
++    nv_lock_rm(nv);
++    nv_ctl_device.nv_state.flags |= NV_FLAG_HOTKEY_OCCURRED;
++    nv_unlock_rm(nv);
++
++    wake_up_interruptible(&nv_ctl_waitqueue);
++}
++
++int nv_kern_read_cardinfo(char *page, char **start, off_t off,
++        int count, int *eof, void *data)
++{
++    struct pci_dev *dev;
++    char *type, *fmt, tmpstr[NV_DEVICE_NAME_LENGTH];
++    int len = 0, status;
++    U032 vbios_rev1, vbios_rev2, vbios_rev3, vbios_rev4, vbios_rev5;
++
++    nv_state_t *nv;
++    nv = (nv_state_t *) data;
++
++    dev = nv_get_pci_device(nv);
++    if (!dev)
++        return 0;
++    
++    if (rm_get_device_name(nv, dev->device, NV_DEVICE_NAME_LENGTH,
++                           tmpstr) != RM_OK) {
++        strcpy (tmpstr, "Unknown");
++    }
++    
++    len += sprintf(page+len, "Model: \t\t %s\n", tmpstr);
++    len += sprintf(page+len, "IRQ:   \t\t %d\n", nv->interrupt_line);
++
++    status = rm_get_vbios_version(nv, &vbios_rev1, &vbios_rev2,
++                                  &vbios_rev3, &vbios_rev4, &vbios_rev5);
++
++    if (status < 0) {
++        /* before rm_init_adapter */
++        len += sprintf(page+len, "Video BIOS: \t ??.??.??.??.??\n");
++    } else {
++        fmt = "Video BIOS: \t %02x.%02x.%02x.%02x.%02x\n";
++        len += sprintf(page+len, fmt, vbios_rev1, vbios_rev2, vbios_rev3,
++                                                  vbios_rev4, vbios_rev5);
++    }
++
++    type = nvos_find_agp_capability(dev) ? "AGP" : "PCI";
++    len += sprintf(page+len, "Card Type: \t %s\n", type);
++
++    NV_PCI_DEV_PUT(dev);
++    return len;
++}
++
++int nv_kern_read_version(char *page, char **start, off_t off,
++        int count, int *eof, void *data)
++{
++    int len = 0;
++    
++    len += sprintf(page+len, "NVRM version: %s\n", pNVRM_ID);
++    len += sprintf(page+len, "GCC version:  %s\n", NV_COMPILER);
++    
++    return len;
++}
++
++int nv_kern_read_agpinfo(char *page, char **start, off_t off,
++        int count, int *eof, void *data)
++{
++    struct pci_dev *dev;
++    char   *fw, *sba;
++    u8     cap_ptr;
++    u32    status, command, agp_rate;
++    int    len = 0;
++    
++    nv_state_t *nv;
++    nv = (nv_state_t *) data;
++
++    if (nv) {
++        dev = nv_get_pci_device(nv);
++        if (!dev)
++            return 0;
++    } else {
++        dev = nvos_get_agp_device_by_class(PCI_CLASS_BRIDGE_HOST);
++        if (!dev)
++            return 0;
++
++        len += sprintf(page+len, "Host Bridge: \t ");
++
++#if defined(CONFIG_PCI_NAMES)
++        len += sprintf(page+len, "%s\n", NV_PCI_DEVICE_NAME(dev));
++#else
++        len += sprintf(page+len, "PCI device %04x:%04x\n",
++                dev->vendor, dev->device);
++#endif
++    }
++
++    /* what can this AGP device do? */
++    cap_ptr = nvos_find_agp_capability(dev);
++
++    pci_read_config_dword(dev, cap_ptr + 4, &status);
++    pci_read_config_dword(dev, cap_ptr + 8, &command);
++
++    fw  = (status & 0x00000010) ? "Supported" : "Not Supported";
++    sba = (status & 0x00000200) ? "Supported" : "Not Supported";
++
++    len += sprintf(page+len, "Fast Writes: \t %s\n", fw);
++    len += sprintf(page+len, "SBA: \t\t %s\n", sba);
++
++    agp_rate = status & 0x7;
++    if (status & 0x8) // agp 3.0
++        agp_rate <<= 2;
++
++    len += sprintf(page+len, "AGP Rates: \t %s%s%s%s\n",
++            (agp_rate & 0x00000008) ? "8x " : "",
++            (agp_rate & 0x00000004) ? "4x " : "",
++            (agp_rate & 0x00000002) ? "2x " : "",
++            (agp_rate & 0x00000001) ? "1x " : "");
++
++    len += sprintf(page+len, "Registers: \t 0x%08x:0x%08x\n", status, command);
++
++    NV_PCI_DEV_PUT(dev);
++    return len;
++}
++
++int nv_kern_read_status(char *page, char **start, off_t off,
++        int count, int *eof, void *data)
++{
++    struct pci_dev *dev;
++    char   *fw, *sba, *drv;
++    int    len = 0;
++    u8     cap_ptr;
++    u32    scratch;
++    u32    status, command, agp_rate;
++
++    nv_state_t *nv;
++    nv = (nv_state_t *) data;
++
++    dev = nvos_get_agp_device_by_class(PCI_CLASS_BRIDGE_HOST);
++    if (!dev)
++        return 0;
++    cap_ptr = nvos_find_agp_capability(dev);
++
++    pci_read_config_dword(dev, cap_ptr + 4, &status);
++    pci_read_config_dword(dev, cap_ptr + 8, &command);
++    NV_PCI_DEV_PUT(dev);
++
++    dev = nvos_get_agp_device_by_class(PCI_CLASS_DISPLAY_VGA);
++    if (!dev)
++        return 0;
++    cap_ptr = nvos_find_agp_capability(dev);
++
++    pci_read_config_dword(dev, cap_ptr + 4, &scratch);
++    status &= scratch;
++    pci_read_config_dword(dev, cap_ptr + 8, &scratch);
++    command &= scratch;
++
++    if (NV_AGP_ENABLED(nv) && (command & 0x100)) {
++        len += sprintf(page+len, "Status: \t Enabled\n");
++
++        drv = NV_OSAGP_ENABLED(nv) ? "AGPGART" : "NVIDIA";
++        len += sprintf(page+len, "Driver: \t %s\n", drv);
++
++        // mask off agp rate. 
++        // If this is agp 3.0, we need to shift the value
++        agp_rate = command & 0x7;
++        if (status & 0x8) // agp 3.0
++            agp_rate <<= 2;
++
++        len += sprintf(page+len, "AGP Rate: \t %dx\n", agp_rate);
++
++        fw = (command & 0x00000010) ? "Enabled" : "Disabled";
++        len += sprintf(page+len, "Fast Writes: \t %s\n", fw);
++
++        sba = (command & 0x00000200) ? "Enabled" : "Disabled";
++        len += sprintf(page+len, "SBA: \t\t %s\n", sba);
++    } else {
++        int agp_config = 0;
++
++        len += sprintf(page+len, "Status: \t Disabled\n\n");
++
++        /*
++         * If we find AGP is disabled, but the RM registry indicates it
++         * was requested, direct the user to the kernel log (we, or even
++         * the kernel may have printed a warning/an error message).
++         *
++         * Note that the "XNvAGP" registry key reflects the user request
++         * and overrides the RM "NvAGP" key, if present.
++         */
++        rm_read_registry_dword(nv, "NVreg", "NvAGP",  &agp_config);
++        rm_read_registry_dword(nv, "NVreg", "XNvAGP", &agp_config);
++
++        if (agp_config != NVOS_AGP_CONFIG_DISABLE_AGP && NV_AGP_FAILED(nv)) {
++            len += sprintf(page+len,
++                  "AGP initialization failed, please check the ouput  \n"
++                  "of the 'dmesg' command and/or your system log file \n"
++                  "for additional information on this problem.        \n");
++        }
++    }
++
++    NV_PCI_DEV_PUT(dev);
++    return len;
++}
++
++
++/***
++ *** EXPORTS to rest of resman
++ ***/
++
++void* NV_API_CALL  nv_find_nv_mapping(
++    nv_state_t    *nv,
++    unsigned long  address
++)
++{
++    nv_alloc_t *at;
++
++    at = nvl_find_alloc(NV_GET_NVL_FROM_NV_STATE(nv), address, 
++                                    NV_ALLOC_TYPE_PCI);
++    if (at && at->page_table)
++    {
++        // we've found the mapping and associated 'at' (in theory)
++        // track down the actual page within this allocation and return
++        // a kernel virtual mapping to it.
++        int i;
++        unsigned long offset;
++
++        // save the page offset so we can add it to the returned address
++        // page-align our address to make finding it a little easier
++        offset = address & ~PAGE_MASK;
++        address &= PAGE_MASK;
++
++        for (i = 0; i < at->num_pages; i++)
++        {
++            if (address == at->page_table[i]->phys_addr)
++                return (void *)(at->page_table[i]->virt_addr + offset);
++        }
++    }
++
++    return NULL;
++}
++
++ULONG NV_API_CALL nv_find_dma_mapping(
++    nv_state_t    *nv,
++    unsigned long  address
++)
++{
++    nv_alloc_t *at = NULL;
++
++    at = nvl_find_alloc(NV_GET_NVL_FROM_NV_STATE(nv), address, 
++                        NV_ALLOC_TYPE_PCI);
++    if (at && at->page_table)
++    {
++        // we've found the mapping and associated 'at' (in theory)
++        // track down the actual page within this allocation and return
++        // the corresponding DMA address.
++        int i;
++        unsigned long offset;
++
++        // save the page offset so we can add it to the returned address
++        // page-align our address to make finding it a little easier
++        offset = address & ~PAGE_MASK;
++        address &= PAGE_MASK;
++
++        for (i = 0; i < at->num_pages; i++)
++        {
++            if (address == at->page_table[i]->phys_addr)
++                return (unsigned long)at->page_table[i]->dma_addr + offset;
++        }
++    }
++
++    return 0;
++}
++
++/* Lookup the physiscal address of the page that backs a dma address */
++ULONG  NV_API_CALL nv_dma_to_phys_address(
++    nv_state_t    *nv,
++    ULONG          dma_address
++)
++{
++    nv_alloc_t *at;
++    nv_linux_state_t    *nvl;
++
++    nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++    for (at = nvl->alloc_queue; at; at = at->next)
++    {
++        if (at->page_table)
++        {
++            int i;
++            unsigned long offset = dma_address & ~PAGE_MASK;
++            unsigned long address = dma_address & PAGE_MASK;
++            for (i = 0; i < at->num_pages; i++)
++            {
++                if (address == at->page_table[i]->dma_addr)
++                {
++                    return at->page_table[i]->phys_addr + offset;
++                }
++            }
++        }
++    }
++
++    return 0;
++}
++
++/* For some newer AGP chipsets, such as the 460GX, the user's virtual address 
++ * is not mapped directly to the agp aperture on the CPU's page tables. Instead,
++ * they map to the underlying physical pages. This function is passed the
++ * address of the underlying physical page (which is loaded into the GART) and
++ * returns the agp aperture that the page is mapped to, so we can load that
++ * page into the graphics card.
++ * use the standard nvl_find_alloc to search on the physical page and rely on
++ * the TYPE_AGP flag to differeniate it from a PCI allocation.
++ * failure is fine, we may just be checking if a given page is agp
++ */
++void* NV_API_CALL nv_find_agp_kernel_mapping(
++    nv_state_t    *nv,
++    unsigned long  address
++)
++{
++    nv_alloc_t *at = NULL;
++
++    at = nvl_find_alloc(NV_GET_NVL_FROM_NV_STATE(nv), address, 
++                        NV_ALLOC_TYPE_AGP);
++    if (at && at->page_table)
++    {
++        // we've found the mapping and associated 'at' (in theory)
++        // track down the actual page within this allocation and return
++        // the agp aperture mapping to it (key_mapping should be the base
++        // of this aperture mapping, so track down the page within that mapping)
++        int i;
++        for (i = 0; i < at->num_pages; i++)
++        {
++            if (address == (unsigned long) at->page_table[i]->dma_addr)
++            {
++                return (void *)((unsigned long) at->key_mapping + 
++                    (i * PAGE_SIZE));
++            }
++        }
++    }
++
++    return NULL;
++}
++
++
++#if defined(NVCPU_IA64)
++#  define KERN_PAGE_MASK      _PFN_MASK
++#else
++#  define KERN_PAGE_MASK      PAGE_MASK
++#endif
++
++/* virtual address to physical page address */
++static unsigned long
++_get_phys_address(
++    unsigned long address,
++    int kern
++)
++{
++    struct mm_struct *mm;
++    pgd_t *pgd = NULL;
++    pmd_t *pmd = NULL;
++    pte_t *pte = NULL;
++    unsigned long retval;
++
++    mm = (kern) ? &init_mm : current->mm;
++    spin_lock(&mm->page_table_lock);
++
++    pgd = NV_PGD_OFFSET(address, kern, mm);
++    if (!NV_PGD_PRESENT(pgd))
++        goto failed;
++
++    pmd = NV_PMD_OFFSET(address, pgd);
++    if (!NV_PMD_PRESENT(pmd))
++        goto failed;
++
++    pte = NV_PTE_OFFSET(address, pmd);
++    if (!NV_PTE_PRESENT(pte))
++        goto failed;
++
++    retval = ((NV_PTE_VALUE(pte) & KERN_PAGE_MASK) | NV_MASK_OFFSET(address));
++
++#if defined(NVCPU_X86_64) && defined(_PAGE_NX)
++    // mask out the non-executable page bit for the true physical address
++    retval &= ~_PAGE_NX;
++#endif
++
++    spin_unlock(&mm->page_table_lock);
++    return retval;
++
++failed:
++    spin_unlock(&mm->page_table_lock);
++    return 0;
++}
++
++unsigned long NV_API_CALL nv_get_kern_phys_address(
++    unsigned long address
++)
++{
++    // make sure this address is a kernel pointer
++    // IA64's memory layout is different from X86, at least in some cases.
++    // instead of fine-tuning it, let's just bail, since we're really just 
++    // trying to catch programming mistakes when debugging
++#if defined(DEBUG) && !defined(CONFIG_X86_4G) && !defined(NVCPU_IA64)
++    if (address < PAGE_OFFSET)
++    {
++        nv_printf(NV_DBG_WARNINGS,
++            "NVRM: user address passed to get_kern_phys_address: 0x%lx\n",
++            address);
++        return 0;
++    }
++#endif
++
++#if defined(NVCPU_IA64)
++    if (address > __IA64_UNCACHED_OFFSET)
++        return address - __IA64_UNCACHED_OFFSET;
++#endif
++
++    /* direct-mapped kernel address */
++    if ((address > PAGE_OFFSET) && (address < VMALLOC_START))
++        return __pa(address);
++
++    return _get_phys_address(address, 1);
++}
++
++unsigned long NV_API_CALL nv_get_user_phys_address(
++    unsigned long address
++)
++{
++    // make sure this address is not a kernel pointer
++    // IA64's memory layout is different from X86, at least in some cases.
++    // instead of fine-tuning it, let's just bail, since we're really just 
++    // trying to catch programming mistakes when debugging
++#if defined(DEBUG) && !defined(CONFIG_X86_4G) && !defined(NVCPU_IA64)
++    if (address >= PAGE_OFFSET)
++    {
++        nv_printf(NV_DBG_WARNINGS,
++            "NVRM: kernel address passed to get_user_phys_address: 0x%lx\n",
++            address);
++        return 0;
++    }
++#endif
++
++    return _get_phys_address(address, 0);
++}
++
++
++/* allocate memory for DMA push buffers */
++int NV_API_CALL nv_alloc_pages(
++    nv_state_t *nv,
++    void **pAddress,
++    unsigned int page_count,
++    unsigned int agp_memory,
++    unsigned int contiguous,
++    unsigned int cached,
++    unsigned int kernel,
++    unsigned int class,
++    void **priv_data
++)
++{
++    nv_alloc_t *at;
++    RM_STATUS rm_status = 0;
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++
++    nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages\n", page_count);
++    nv_printf(NV_DBG_MEMINFO, "NVRM: VM:    agp %d  contig %d  cached %d  kernel %d\n",
++        agp_memory, contiguous, cached, kernel);
++
++    page_count = RM_PAGES_TO_OS_PAGES(page_count);
++    at = nvos_create_alloc(nvl->dev, page_count);
++    if (at == NULL)
++        return RM_ERROR;
++
++    at->class = class;
++    at->flags = nv_alloc_init_flags(cached, agp_memory, contiguous, kernel);
++
++    if (agp_memory)
++    {
++        int offset;
++
++        if (!NV_AGP_ENABLED(nv))
++            goto failed;
++
++        /* allocate agp-able memory */
++        if (NV_OSAGP_ENABLED(nv))
++        {
++            /* agpgart will allocate all of the underlying memory */
++            rm_status = KernAllocAGPPages(nv, pAddress, page_count, priv_data, &offset);
++            if (rm_status)
++                goto failed;
++
++            at->priv_data = *priv_data;
++            nvl_add_alloc(nvl, at);
++        } else {
++            /* use nvidia's nvagp support */
++            if (nv_vm_malloc_pages(nv, at))
++                goto failed;
++
++            at->class = class;
++
++            // set our 'key' to the page_table. rm_alloc_agp_pages will call
++            // nv_translate_address below, which will look up pages using
++            // the value of *pAddress as a key, then index into the page_table
++            // once we're done with rm_alloc_agp_pages, we no longer need
++            // this, and the 'key' will be replaced below
++            *pAddress = at->page_table;
++            at->key_mapping = at->page_table;
++
++            /* the 'at' needs to be added before the alloc agp pages call */
++            nvl_add_alloc(nvl, at);
++            rm_status = rm_alloc_agp_pages(nv,
++                                        pAddress,
++                                        page_count,
++                                        class,
++                                        priv_data,
++                                        &offset);
++            if (rm_status)
++            {
++                nvl_remove_alloc(nvl, at);
++                NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
++                goto failed;
++            }
++            at->priv_data = *priv_data;
++        }
++        // return the physical address of the allocation for mmap
++        // in this case, 'physical address' is within the agp aperture
++        *pAddress = (void *)(NV_UINTPTR_T)(nv->agp.address + (offset << PAGE_SHIFT));
++    }
++    else 
++    {
++
++        if (nv_vm_malloc_pages(nv, at))
++            goto failed;
++
++        if (kernel)
++        {
++            *pAddress = (void *) at->page_table[0]->virt_addr;
++        }
++        else
++        {
++            /* must be page-aligned or mmap will fail
++             * so use the first page, which is page-aligned. this way, our 
++             * allocated page table does not need to be page-aligned
++             */
++            *pAddress = (void *) at->page_table[0]->phys_addr;
++        }
++
++        nvl_add_alloc(nvl, at);
++    }
++
++    at->key_mapping = *pAddress;
++    NV_ATOMIC_INC(at->usage_count);
++
++    return RM_OK;
++
++failed:
++    nvos_free_alloc(at);
++
++    return -1;
++}
++
++#define NV_FAILED_TO_FIND_AT(nv, addr) \
++    { \
++        nv_up(nvl->at_lock); \
++        nv_printf(NV_DBG_ERRORS, "NVRM: couldn't find alloc for 0x%p\n", addr); \
++        return -1; \
++    }
++
++int NV_API_CALL nv_free_pages(
++    nv_state_t *nv,
++    void **pAddress,
++    unsigned int page_count,
++    unsigned int agp_memory,
++    void *priv_data
++)
++{
++    int rmStatus = 0;
++    nv_alloc_t *at;
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++
++    page_count = RM_PAGES_TO_OS_PAGES(page_count);
++    nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_free_pages: 0x%x 0x%x\n",
++        *pAddress, page_count);
++
++    if (agp_memory)
++    {
++        if (!NV_AGP_ENABLED(nv))
++            return -1;
++
++        /* only lock ldata while removing 'at' from the list */
++        nv_down(nvl->at_lock);
++        at = nvl_find_alloc(nvl, (unsigned long) *pAddress, NV_ALLOC_TYPE_AGP);
++        if (at == NULL)
++            NV_FAILED_TO_FIND_AT(nv, *pAddress);
++        if (at->num_pages != page_count)
++            NV_FAILED_TO_FIND_AT(nv, *pAddress);
++        nvl_remove_alloc(nvl, at);
++        nv_up(nvl->at_lock);
++
++        NV_ATOMIC_DEC(at->usage_count);
++
++        if (NV_OSAGP_ENABLED(nv))
++        {
++            rmStatus = KernFreeAGPPages(nv, pAddress, priv_data);
++        } else {
++            rmStatus = rm_free_agp_pages(nv, pAddress, priv_data);
++            if (rmStatus == RM_OK)
++            {
++                NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
++            }
++        }
++    } else {
++        /* only lock ldata while removing 'at' from the list */
++        nv_down(nvl->at_lock);
++        at = nvl_find_alloc(nvl, (unsigned long) *pAddress, NV_ALLOC_TYPE_PCI);
++        if (at == NULL)
++            NV_FAILED_TO_FIND_AT(nv, *pAddress);
++        if (at->num_pages != page_count)
++            NV_FAILED_TO_FIND_AT(nv, *pAddress);
++        nvl_remove_alloc(nvl, at);
++        nv_up(nvl->at_lock);
++
++        NV_ATOMIC_DEC(at->usage_count);
++
++        NV_VM_UNLOCK_AND_FREE_PAGES(nv, NV_ATOMIC_READ(at->usage_count), at);
++    }
++
++    if (NV_ATOMIC_READ(at->usage_count) == 0)
++        nvos_free_alloc(at);
++
++    return rmStatus;
++}
++
++
++static void nv_lock_init_locks
++( 
++    nv_state_t *nv
++)
++{
++    nv_linux_state_t *nvl;
++    nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++
++    spin_lock_init(&nvl->rm_lock);
++
++    sema_init(&nvl->ldata_lock, 1);
++    sema_init(&nvl->at_lock, 1);
++    NV_ATOMIC_SET(nvl->usage_count, 0);
++
++    nvl->rm_lock_cpu = -1;
++    nvl->rm_lock_count = 0;
++}
++
++void NV_API_CALL nv_lock_rm(
++    nv_state_t *nv
++)
++{
++    nv_linux_state_t *nvl;
++    int cpu;
++
++    nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++    cpu = get_cpu();
++
++    if (nvl->rm_lock_cpu == cpu)
++    {
++        nvl->rm_lock_count++;
++        put_cpu();
++        return;
++    }
++
++    put_cpu();
++    spin_unlock_wait(&nvl->rm_lock);
++    spin_lock_irq(&nvl->rm_lock);
++
++    nvl->rm_lock_cpu = smp_processor_id();
++    nvl->rm_lock_count = 1;
++}
++
++void NV_API_CALL nv_unlock_rm(
++    nv_state_t *nv
++)
++{
++    nv_linux_state_t *nvl;
++    nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++
++    if (--nvl->rm_lock_count)
++        return;
++
++    nvl->rm_lock_cpu = -1;
++    spin_unlock_irq(&nvl->rm_lock);
++}
++
++/*
++** post the event
++*/
++ 
++void NV_API_CALL nv_post_event(
++    nv_state_t *nv,
++    nv_event_t *event,
++    U032        handle,
++    U032        index
++)
++{
++    struct file *file = (struct file *) event->file;
++    nv_file_private_t *nvfp = NV_GET_NVFP(file);
++    unsigned long eflags;
++
++    nv_printf(NV_DBG_EVENTINFO, "NVRM: posting event on 0x%x:0x%x\n",
++        event, nvfp);
++
++    nv_lock_irq(nvfp->fp_lock, eflags);
++
++    if (nvfp->num_events == NV_EVENT_FIFO_SIZE)
++    {
++        wake_up_interruptible(&nvfp->waitqueue);
++        nv_unlock_irq(nvfp->fp_lock, eflags);
++        return;
++    }
++
++    // copy the event into the queue
++    nvfp->event_fifo[nvfp->put] = *event;
++
++    // set the handle for this event
++    nvfp->event_fifo[nvfp->put].hObject = handle;
++    nvfp->event_fifo[nvfp->put].index   = index;
++    
++    nvfp->num_events++;
++    nvfp->put++;
++    if (nvfp->put >= NV_EVENT_FIFO_SIZE)
++        nvfp->put = 0;
++
++    wake_up_interruptible(&nvfp->waitqueue);
++    nv_unlock_irq(nvfp->fp_lock, eflags);
++}
++
++int NV_API_CALL nv_get_event(
++    nv_state_t *nv,
++    void *void_file,
++    nv_event_t *event,
++    U032 *more_events
++)
++{
++    struct file *file = (struct file *) void_file;
++    nv_file_private_t *nvfp = NV_GET_NVFP(file);
++    unsigned long eflags;
++
++    nv_lock_irq(nvfp->fp_lock, eflags);
++    if (nvfp->num_events == 0)
++    {
++        nv_unlock_irq(nvfp->fp_lock, eflags);
++        return -1;
++    }
++
++    *event = nvfp->event_fifo[nvfp->get];
++    nvfp->num_events--;
++    nvfp->get++;
++    if (nvfp->get >= NV_EVENT_FIFO_SIZE)
++        nvfp->get = 0;
++
++    if (more_events)
++        *more_events = nvfp->num_events;
++
++    nv_printf(NV_DBG_EVENTINFO, "NVRM: returning event: 0x%x\n", event);
++    nv_printf(NV_DBG_EVENTINFO, "NVRM:     hParent: 0x%x\n", event->hParent);
++    nv_printf(NV_DBG_EVENTINFO, "NVRM:     hObject: 0x%x\n", event->hObject);
++    nv_printf(NV_DBG_EVENTINFO, "NVRM:     file:    0x%p\n", event->file);
++    nv_printf(NV_DBG_EVENTINFO, "NVRM:     fd:      %d\n", event->fd);
++    if (more_events)
++        nv_printf(NV_DBG_EVENTINFO, "NVRM: more events: %d\n", *more_events);
++
++    nv_unlock_irq(nvfp->fp_lock, eflags);
++
++    return 0;
++}
++
++
++int NV_API_CALL nv_agp_init(
++    nv_state_t *nv,
++    void **phys_start,
++    void **linear_start,
++    void *agp_limit,
++    U032 config         /* passed in from XF86Config file */
++)
++{
++    U032 status = 1;
++    static int old_error = 0;
++
++    if (NV_AGP_ENABLED(nv))
++        return -1;
++
++    if (config == NVOS_AGP_CONFIG_DISABLE_AGP)
++    {
++        nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP;
++        nv->agp_status = NV_AGP_STATUS_DISABLED;
++        return 0;
++    }
++
++    nv_printf(NV_DBG_SETUP, "NVRM: nv_agp_init\n");
++
++    nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP;
++    nv->agp_status = NV_AGP_STATUS_FAILED;
++
++    if (config & NVOS_AGP_CONFIG_OSAGP)
++    {
++        status = KernInitAGP(nv, phys_start, linear_start, agp_limit);
++
++        /* if enabling agpgart was successfull, register it,
++         * and check about overrides
++         */
++        if (status == 0)
++        {
++            nv->agp_config = NVOS_AGP_CONFIG_OSAGP;
++            nv->agp_status = NV_AGP_STATUS_ENABLED;
++
++            /* make sure we apply our overrides in this case */
++            rm_update_agp_config(nv);
++        }
++
++        if (status == 1 && !(config & NVOS_AGP_CONFIG_NVAGP) && !old_error)
++        {
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM: unable to initialize the Linux AGPGART driver, please \n"
++                "NVRM: verify you configured your kernel to include support  \n"
++                "NVRM: for AGPGART (either statically linked, or as a kernel \n"
++                "NVRM: module). Please also make sure you selected support   \n"
++                "NVRM: for your AGP chipset.                                 \n");
++#if defined(KERNEL_2_6)
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM:                                                       \n"
++                "NVRM: note that as of Linux 2.6 AGPGART, all chipset/vendor \n"
++                "NVRM: drivers are split into independent modules; make sure \n"
++                "NVRM: the correct one is loaded for your chipset.           \n");
++#endif
++            old_error = 1;
++        }
++
++        /* if agpgart is loaded, but we failed to initialize it,
++         * we'd better not attempt nvagp, or we're likely to lock
++         * the machine.
++         */
++        if (status < 0)
++            return status;
++    }
++
++    /* we're either explicitly not using agpgart,
++     * or trying to use agpgart failed
++     * make sure the user did not specify "use agpgart only"
++     */
++    if ( (!NV_AGP_ENABLED(nv)) && (config & NVOS_AGP_CONFIG_NVAGP) )
++    {
++        /* make sure the user does not have agpgart loaded */
++        if (inter_module_get("drm_agp")) {
++            inter_module_put("drm_agp");
++            nv_printf(NV_DBG_WARNINGS, "NVRM: not using NVAGP, AGPGART is loaded!!\n");
++        } else {
++#if defined(CONFIG_X86_64) && defined(CONFIG_GART_IOMMU)
++            nv_printf(NV_DBG_WARNINGS,
++                "NVRM: not using NVAGP, kernel was compiled with GART_IOMMU support!!\n");
++#else
++            status = rm_init_agp(nv);
++            if (status == RM_OK)
++            {
++                nv->agp_config = NVOS_AGP_CONFIG_NVAGP;
++                nv->agp_status = NV_AGP_STATUS_ENABLED;
++            }
++#endif
++        }
++    }
++
++    if (NV_AGP_ENABLED(nv))
++        old_error = 0; /* report new errors */
++
++    nv_printf(NV_DBG_SETUP, 
++        "NVRM: agp_init finished with status 0x%x and config %d\n",
++        status, nv->agp_config);
++
++    return status;
++}
++
++int NV_API_CALL nv_agp_teardown(
++    nv_state_t *nv
++)
++{
++    U032 status = 1;
++
++    nv_printf(NV_DBG_SETUP, "NVRM: nv_agp_teardown\n");
++
++    /* little sanity check won't hurt */
++    if (!NV_AGP_ENABLED(nv))
++        return -1;
++
++    if (NV_OSAGP_ENABLED(nv))
++        status = KernTeardownAGP(nv);
++    else if (NV_NVAGP_ENABLED(nv))
++        status = rm_teardown_agp(nv);
++
++    nv->agp_config = NVOS_AGP_CONFIG_DISABLE_AGP;
++    nv->agp_status = NV_AGP_STATUS_DISABLED;
++
++    nv_printf(NV_DBG_SETUP, "NVRM: teardown finished with status 0x%x\n", 
++        status);
++
++    return status;
++}
++
++int NV_API_CALL nv_translate_address(
++    nv_state_t *nv,
++    ULONG       base,
++    U032        index,
++    U032       *paddr
++)
++{
++    nv_alloc_t *at;
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++
++    /* find the 'at' */
++    at = nvl_find_alloc(nvl, base, NV_ALLOC_TYPE_AGP | NV_ALLOC_TYPE_PCI);
++    if (at == NULL)
++        return RM_ERROR;
++
++    if (index > at->num_pages)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: translate_address: ",
++            "at has inconsistent number of pages\n");
++        return RM_ERROR;
++    }
++
++    /* get the physical address of this page */
++    *paddr = (U032) ((NV_UINTPTR_T)at->page_table[index]->dma_addr);
++
++    return RM_OK;
++}
++
++
++int NV_API_CALL nv_int10h_call(
++    nv_state_t *nv,
++    U032 *eax,
++    U032 *ebx,
++    U032 *ecx,
++    U032 *edx,
++    void *buffer
++)
++{
++    return -1;
++}
++
++/* set a timer to go off every second */
++int NV_API_CALL nv_start_rc_timer(
++    nv_state_t *nv
++)
++{
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++
++    if (nv->rc_timer_enabled)
++        return -1;
++
++    nv_printf(NV_DBG_INFO, "NVRM: initializing rc timer\n");
++    init_timer(&nvl->rc_timer);
++    nvl->rc_timer.function = nv_kern_rc_timer;
++    nvl->rc_timer.data = (unsigned long) nv;
++    nv->rc_timer_enabled = 1;
++    mod_timer(&nvl->rc_timer, jiffies + HZ); /* set our timeout for 1 second */
++    nv_printf(NV_DBG_INFO, "NVRM: rc timer initialized\n");
++
++    return 0;
++}
++
++int NV_API_CALL nv_stop_rc_timer(
++    nv_state_t *nv
++)
++{
++    nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
++
++    if (!nv->rc_timer_enabled)
++        return -1;
++
++    nv_printf(NV_DBG_INFO, "NVRM: stopping rc timer\n");
++    nv->rc_timer_enabled = 0;
++    del_timer(&nvl->rc_timer);
++    nv_printf(NV_DBG_INFO, "NVRM: rc timer stopped\n");
++
++    return 0;
++}
++
++/* make sure the pci_driver called probe for all of our devices.
++ * we've seen cases where rivafb claims the device first and our driver
++ * doesn't get called.
++ */
++static int
++nvos_count_devices(void)
++{
++    struct pci_dev *dev;
++    int count = 0;
++
++    dev = NV_PCI_GET_CLASS(PCI_CLASS_DISPLAY_VGA << 8, NULL);
++    while (dev)
++    {
++        if ((dev->vendor == 0x10de) && (dev->device >= 0x20))
++            count++;
++        dev = NV_PCI_GET_CLASS(PCI_CLASS_DISPLAY_VGA << 8, dev);
++    }
++    return count;
++}
++
++/* find nvidia devices and set initial state */
++int
++nv_kern_probe
++(
++    struct pci_dev *dev,
++    const struct pci_device_id *id_table
++)
++{
++    nv_state_t *nv;
++    nv_linux_state_t *nvl;
++
++    if ((dev->vendor != 0x10de) || (dev->device < 0x20) || 
++        (dev->class != (PCI_CLASS_DISPLAY_VGA << 8)))
++    {
++        return -1;
++    }
++
++    if (num_nv_devices == NV_MAX_DEVICES)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: maximum device number (%d) reached!\n", num_nv_devices);
++        return -1;
++    }
++
++    // enable io, mem, and bus-mastering in pci config space
++    if (pci_enable_device(dev) != 0)
++    {
++        nv_printf(NV_DBG_ERRORS,
++            "NVRM: pci_enable_device failed, aborting\n");
++        return -1;
++    }
++
++    // request ownership of our bars
++    // keeps other drivers from banging our registers.
++    // only do this for registers, as vesafb requests our framebuffer and will
++    // keep us from working properly
++    if (!request_mem_region(NV_PCI_RESOURCE_START(dev, 1),
++                            NV_PCI_RESOURCE_SIZE(dev, 1), "nvidia"))
++    {
++        nv_printf(NV_DBG_ERRORS,
++            "NVRM: request_mem_region failed for 0x%x:0x%x, aborting\n",
++             NV_PCI_RESOURCE_START(dev, 1), NV_PCI_RESOURCE_SIZE(dev, 1));
++        goto err_disable_dev;
++    }
++    pci_set_master(dev);
++
++    /* initialize bus-dependent config state */
++    nvl = &nv_linux_devices[num_nv_devices];
++    nv  = NV_STATE_PTR(nvl);
++
++    nvl->dev          = dev;
++    nv->vendor_id     = dev->vendor;
++    nv->device_id     = dev->device;
++    nv->os_state      = (void *) nvl;
++    nv->bus           = NV_PCI_BUS_NUMBER(dev);
++    nv->slot          = NV_PCI_SLOT_NUMBER(dev);
++
++    nv_lock_init_locks(nv);
++    
++
++    nv->bar.regs.address = NV_PCI_RESOURCE_START(dev, 1);
++    nv->bar.regs.size    = NV_PCI_RESOURCE_SIZE(dev, 1);
++
++    nv->bar.fb.address   = NV_PCI_RESOURCE_START(dev, 2);
++    nv->bar.fb.size      = NV_PCI_RESOURCE_SIZE(dev, 2);
++    
++    nv->interrupt_line = dev->irq;
++
++    /* check common error condition */
++    if (nv->interrupt_line == 0)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: Can't find an IRQ for your NVIDIA card!  \n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: Please check your BIOS settings.         \n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: [Plug & Play OS   ] should be set to NO  \n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: [Assign IRQ to VGA] should be set to YES \n");
++        goto err_zero_dev;
++    }
++
++    /* sanity check the IO apertures */
++    if ( (nv->bar.regs.address == 0) || (nv->bar.regs.size == 0) ||
++         (nv->bar.fb.address   == 0) || (nv->bar.fb.size   == 0))
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: The IO regions for your NVIDIA card are invalid.\n");
++        nv_printf(NV_DBG_ERRORS, "NVRM: Your system BIOS may have misconfigured your graphics card.\n");
++
++        if ((nv->bar.regs.address == 0) || (nv->bar.regs.size == 0))
++        {
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM: bar0 (registers) appears to be wrong: 0x%x 0x%x\n",
++                nv->bar.regs.address, nv->bar.regs.size);
++        }
++
++        if ((nv->bar.fb.address == 0) || (nv->bar.fb.size == 0))
++        {
++            nv_printf(NV_DBG_ERRORS,
++                "NVRM: bar1 (framebuffer) appears to be wrong: 0x%x 0x%x\n",
++                nv->bar.fb.address, nv->bar.fb.size);
++        }
++
++        goto err_zero_dev;
++    }
++
++
++#if defined(NV_BUILD_NV_PAT_SUPPORT)
++    if (nvos_find_pci_express_capability(nvl->dev))
++    {
++        __nv_enable_pat_support();
++    }
++#endif
++
++#if defined(NV_MAP_REGISTERS_EARLY)
++    nv->bar.regs.map = os_map_kernel_space(nv->bar.regs.address,
++                                           nv->bar.regs.size,
++                                           NV_MEMORY_UNCACHED);
++    nv->bar.regs.map_u = (nv_phwreg_t) nv->bar.regs.map;
++    if (nv->bar.regs.map == NULL)
++    {
++        nv_printf(NV_DBG_ERRORS, "NVRM: failed to map registers!!\n");
++        goto err_zero_dev;
++    }
++    nv->flags |= NV_FLAG_MAP_REGS_EARLY;
++#endif
++
++    nv_printf(NV_DBG_INFO, "NVRM: %02x:%02x.%x %04x:%04x - 0x%08x [size=%dM]\n",
++            nv->bus, nv->slot, PCI_FUNC(dev->devfn),
++            nv->vendor_id, nv->device_id, nv->bar.regs.address,
++            nv->bar.regs.size / (1024 * 1024));
++    nv_printf(NV_DBG_INFO, "NVRM: %02x:%02x.%x %04x:%04x - 0x%08x [size=%dM]\n",
++            nv->bus, nv->slot, PCI_FUNC(dev->devfn),
++            nv->vendor_id, nv->device_id, nv->bar.fb.address,
++            nv->bar.fb.size / (1024 * 1024));
++
++    num_nv_devices++;
++
++    return 0;
++
++err_zero_dev:
++    os_mem_set(nvl, 0, sizeof(nv_linux_state_t));
++    release_mem_region(NV_PCI_RESOURCE_START(dev, 1),
++                       NV_PCI_RESOURCE_SIZE(dev, 1));
++
++err_disable_dev:
++    NV_PCI_DISABLE_DEVICE(dev);
++    return -1;
++}
++
++int NV_API_CALL nv_no_incoherent_mappings
++(
++    void
++)
++{
++#ifdef NV_CHANGE_PAGE_ATTR_PRESENT
++    return 1;
++#else
++    return 0;
++#endif
++}
++
++#if defined(NV_PM_SUPPORT_ACPI)
++
++int
++nv_acpi_event
++(
++    struct pci_dev *dev, 
++    u32 state
++)
++{
++    nv_state_t *nv;
++    nv_linux_state_t *lnv = NULL;
++    int status = RM_OK, i;
++
++    nv_printf(NV_DBG_INFO, "NVRM: nv_acpi_event: %d\n", state);
++
++    for (i = 0; i < NV_MAX_DEVICES; i++)
++    {
++        if (nv_linux_devices[i].dev == dev)
++        {
++            lnv = &nv_linux_devices[i];
++            break;
++        }
++    }
++
++    if ((!lnv) || (lnv->dev != dev))
++    {
++        nv_printf(NV_DBG_WARNINGS, "NVRM: ACPI: invalid device!\n");
++        return -1;
++    }
++
++    nv = NV_STATE_PTR(lnv);
++    if (nv->pdev == NULL)
++    {
++        nv_printf(NV_DBG_WARNINGS, "NVRM: ACPI: device not initialized!\n");
++        return -1;
++    }
++
++    switch (state)
++    {
++        case PM_SUSPEND_MEM:
++            nv_printf(NV_DBG_INFO, "NVRM: ACPI: received suspend event\n");
++            status = rm_power_management(nv, 0, NV_PM_ACPI_STANDBY);
++            break;
++
++        case PM_SUSPEND_ON:
++            nv_printf(NV_DBG_INFO, "NVRM: ACPI: received resume event\n");
++            status = rm_power_management(nv, 0, NV_PM_ACPI_RESUME);
++            break;
++
++        default:
++            nv_printf(NV_DBG_WARNINGS, "NVRM: ACPI: unsupported event: %d\n", state);
++            return -1;
++    }
++
++    if (status != RM_OK)
++        nv_printf(NV_DBG_ERRORS, "NVRM: ACPI: failed event: %d\n", state);
++
++    return status;
++}
++
++int
++nv_kern_acpi_standby
++(
++    struct pci_dev *dev, 
++    u32 state
++)
++{
++    return nv_acpi_event(dev, state);
++}
++
++int
++nv_kern_acpi_resume
++(
++    struct pci_dev *dev
++)
++{
++    return nv_acpi_event(dev, PM_SUSPEND_ON);
++}
++
++#endif
+diff -ruN nvidia-kernel.orig/nv/os-agp.c nvidia-kernel/nv/os-agp.c
+--- nvidia-kernel.orig/nv/os-agp.c	2005-01-11 17:19:49.000000000 -0800
++++ nvidia-kernel/nv/os-agp.c	2005-02-18 15:08:12.957056670 -0800
+@@ -25,6 +25,13 @@
+ 
+ #ifdef AGPGART
+ 
++#if defined(KERNEL_2_6)
++typedef struct agp_kern_info agp_kern_info;
++typedef struct agp_memory agp_memory;
++#elif defined(KERNEL_2_4)
++const drm_agp_t *drm_agp_p; /* functions */
++#endif
++
+ typedef struct {
+     agp_memory *ptr;
+     int num_pages;
+@@ -45,7 +52,6 @@
+ 
+ agp_kern_info         agpinfo;
+ agp_gart              gart;
+-const drm_agp_t       *drm_agp_p;
+ 
+ #if defined(CONFIG_MTRR)
+ #define MTRR_DEL(gart) if ((gart).mtrr > 0) mtrr_del((gart).mtrr, 0, 0);
+@@ -53,6 +59,26 @@
+ #define MTRR_DEL(gart)
+ #endif
+ 
++#if defined(KERNEL_2_6)
++#define NV_AGPGART_BACKEND_ACQUIRE(o) agp_backend_acquire()
++#define NV_AGPGART_BACKEND_ENABLE(o,mode) agp_enable(mode)
++#define NV_AGPGART_BACKEND_RELEASE(o) agp_backend_release()
++#define NV_AGPGART_COPY_INFO(o,p) agp_copy_info(p)
++#define NV_AGPGART_ALLOCATE_MEMORY(o,count,type) agp_allocate_memory(count,type)
++#define NV_AGPGART_FREE_MEMORY(o,p) agp_free_memory(p)
++#define NV_AGPGART_BIND_MEMORY(o,p,offset) agp_bind_memory(p,offset)
++#define NV_AGPGART_UNBIND_MEMORY(o,p) agp_unbind_memory(p)
++#elif defined(KERNEL_2_4)
++#define NV_AGPGART_BACKEND_ACQUIRE(o) ({ (o)->acquire(); 0; })
++#define NV_AGPGART_BACKEND_ENABLE(o,mode) (o)->enable(mode)
++#define NV_AGPGART_BACKEND_RELEASE(o) ((o)->release())
++#define NV_AGPGART_COPY_INFO(o,p) ({ (o)->copy_info(p); 0; })
++#define NV_AGPGART_ALLOCATE_MEMORY(o,count,type) (o)->allocate_memory(count,type)
++#define NV_AGPGART_FREE_MEMORY(o,p) (o)->free_memory(p)
++#define NV_AGPGART_BIND_MEMORY(o,p,offset) (o)->bind_memory(p,offset)
++#define NV_AGPGART_UNBIND_MEMORY(o,p) (o)->unbind_memory(p)
++#endif
++
+ #endif /* AGPGART */
+ 
+ BOOL KernInitAGP(
+@@ -73,8 +99,10 @@
+ 
+     memset( (void *) &gart, 0, sizeof(agp_gart));
+ 
++#if defined(KERNEL_2_4)
+     if (!(drm_agp_p = inter_module_get_request("drm_agp", "agpgart")))
+         return 1;
++#endif
+ 
+     /* NOTE: from here down, return an error code of '-1'
+      * that indicates that agpgart is loaded, but we failed to use it
+@@ -82,11 +110,10 @@
+      * the memory controller.
+      */
+ 
+-    if (drm_agp_p->acquire())
++    if (NV_AGPGART_BACKEND_ACQUIRE(drm_agp_p))
+     {
+-        nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: backend in use\n");
+-        inter_module_put("drm_agp");
+-        return -1;
++        nv_printf(NV_DBG_INFO, "NVRM: AGPGART: no backend available\n");
++        goto bailout;
+     }
+ 
+     if (rm_read_registry_dword(nv, "NVreg", "ReqAGPRate", &agp_rate) == RM_ERROR)
+@@ -101,21 +128,12 @@
+         agp_fw = 1;
+     agp_fw &= 0x00000001;
+ 
+-#if defined(KERNEL_2_4)
+-    /*
+-     * The original Linux 2.4 AGP GART driver interface declared copy_info to
+-     * return nothing. This changed in Linux 2.5, which reports unsupported
+-     * chipsets via this function. If this Linux 2.4 kernels behaves the same
+-     * way, we have no way to know.
+-     */
+-    drm_agp_p->copy_info(&agpinfo);
+-#else
+-    if (drm_agp_p->copy_info(&agpinfo)) {
++    if (NV_AGPGART_COPY_INFO(drm_agp_p, &agpinfo))
++    {
+         nv_printf(NV_DBG_ERRORS,
+             "NVRM: AGPGART: kernel reports chipset as unsupported\n");
+         goto failed;
+     }
+-#endif
+ 
+ #ifdef CONFIG_MTRR
+     /*
+@@ -170,7 +188,7 @@
+     if (!(agp_rate & 0x00000004)) agpinfo.mode &= ~0x00000004;
+     if (!(agp_rate & 0x00000002)) agpinfo.mode &= ~0x00000002;
+     
+-    drm_agp_p->enable(agpinfo.mode);
++    NV_AGPGART_BACKEND_ENABLE(drm_agp_p, agpinfo.mode);
+ 
+     *ap_phys_base   = (void*) agpinfo.aper_base;
+     *ap_mapped_base = (void*) gart.aperture;
+@@ -182,8 +200,11 @@
+ 
+ failed:
+     MTRR_DEL(gart); /* checks gart.mtrr */
+-    drm_agp_p->release();
++    NV_AGPGART_BACKEND_RELEASE(drm_agp_p);
++bailout:
++#if defined(KERNEL_2_4)
+     inter_module_put("drm_agp");
++#endif
+ 
+     return -1;
+ 
+@@ -213,9 +234,10 @@
+         NV_IOUNMAP(gart.aperture, RM_PAGE_SIZE);
+     }
+ 
+-    drm_agp_p->release();
+-
++    NV_AGPGART_BACKEND_RELEASE(drm_agp_p);
++#if defined(KERNEL_2_4)
+     inter_module_put("drm_agp");
++#endif
+ 
+     if (rm_clear_agp_bitmap(nv, &bitmap))
+     {
+@@ -244,7 +266,6 @@
+     return RM_ERROR;
+ #else
+     agp_memory *ptr;
+-    int err;
+     agp_priv_data *data;
+     RM_STATUS status;
+ 
+@@ -262,7 +283,7 @@
+         return RM_ERROR;
+     }
+ 
+-    ptr = drm_agp_p->allocate_memory(PageCount, AGP_NORMAL_MEMORY);
++    ptr = NV_AGPGART_ALLOCATE_MEMORY(drm_agp_p, PageCount, AGP_NORMAL_MEMORY);
+     if (ptr == NULL)
+     {
+         *pAddress = (void*) 0;
+@@ -270,8 +291,7 @@
+         return RM_ERR_NO_FREE_MEM;
+     }
+     
+-    err = drm_agp_p->bind_memory(ptr, *Offset);
+-    if (err)
++    if (NV_AGPGART_BIND_MEMORY(drm_agp_p, ptr, *Offset))
+     {
+         // this happens a lot when the aperture itself fills up..
+         // not a big deal, so don't alarm people with an error message
+@@ -280,14 +300,11 @@
+         goto fail;
+     }
+ 
+-    /* return the agp aperture address */ 
+-    *pAddress = (void *) (agpinfo.aper_base + (*Offset << PAGE_SHIFT));
+-
+     status = os_alloc_mem((void **)&data, sizeof(agp_priv_data));
+     if (status != RM_OK)
+     {
+         nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: memory allocation failed\n");
+-        drm_agp_p->unbind_memory(ptr);
++        NV_AGPGART_UNBIND_MEMORY(drm_agp_p, ptr);
+         goto fail;
+     }
+ 
+@@ -302,7 +319,7 @@
+     return RM_OK;
+ 
+ fail:
+-    drm_agp_p->free_memory(ptr);
++    NV_AGPGART_FREE_MEMORY(drm_agp_p, ptr);
+     *pAddress = (void*) 0;
+ 
+     return RM_ERROR;
+@@ -342,7 +359,7 @@
+     {
+         nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: unable to remap %lu pages\n",
+             (unsigned long)agp_data->num_pages);
+-        drm_agp_p->unbind_memory(agp_data->ptr);
++        NV_AGPGART_UNBIND_MEMORY(drm_agp_p, agp_data->ptr);
+         goto fail;
+     }
+     
+@@ -441,8 +458,8 @@
+     {
+         size_t pages = ptr->page_count;
+ 
+-        drm_agp_p->unbind_memory(ptr);
+-        drm_agp_p->free_memory(ptr);
++        NV_AGPGART_UNBIND_MEMORY(drm_agp_p, ptr);
++        NV_AGPGART_FREE_MEMORY(drm_agp_p, ptr);
+ 
+         nv_printf(NV_DBG_INFO, "NVRM: AGPGART: freed %ld pages\n",
+             (unsigned long)pages);
+diff -ruN nvidia-kernel.orig/nv/os-interface.c nvidia-kernel/nv/os-interface.c
+--- nvidia-kernel.orig/nv/os-interface.c	2005-01-11 17:19:49.000000000 -0800
++++ nvidia-kernel/nv/os-interface.c	2005-02-18 15:07:56.308268009 -0800
+@@ -732,10 +732,17 @@
+ //
+ inline void NV_API_CALL out_string(const char *str)
+ {
++#if DEBUG
+     static int was_newline = 0;
+ 
+-    if (was_newline) printk("%d: %s", smp_processor_id(), str);
+-    else             printk("%s", str);
++    if (NV_NUM_CPUS() > 1 && was_newline)
++    {
++        printk("%d: %s", get_cpu(), str);
++        put_cpu();
++    }
++    else
++#endif
++        printk("%s", str);
+ 
+ #if DEBUG
+     if (NV_NUM_CPUS() > 1)
+@@ -866,7 +873,8 @@
+ )
+ {
+     struct pci_dev *dev;
+-    dev = NV_PCI_GET_SLOT(bus, PCI_DEVFN(slot, function));
++    unsigned int devfn = PCI_DEVFN(slot, function);
++    dev = NV_PCI_GET_SLOT(bus, devfn);
+     if (dev) {
+         if (vendor) *vendor = dev->vendor;
+         if (device) *device = dev->device;

Added: packages/nvidia-graphics-drivers/trunk/patches.save/xenrt
URL: http://svn.debian.org/wsvn/pkg-nvidia/packages/nvidia-graphics-drivers/trunk/patches.save/xenrt?rev=379&op=file
==============================================================================
--- packages/nvidia-graphics-drivers/trunk/patches.save/xenrt (added)
+++ packages/nvidia-graphics-drivers/trunk/patches.save/xenrt Wed May  7 05:09:46 2008
@@ -1,0 +1,143 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 05_xenrt by  <mnencia at debian.org>
+##
+## DP: This patch will allow nvidia to be compile and run on Xen or
+## DP: Realtime Preemption enabled kernels.
+
+ at DPATCH@
+Index: usr/src/nv/nv-linux.h
+===================================================================
+--- usr/src/nv/nv-linux.h.orig	2007-03-26 15:58:49.000000000 +0200
++++ usr/src/nv/nv-linux.h	2007-03-26 15:59:11.000000000 +0200
+@@ -244,7 +244,7 @@
+  * tiny, and the kernel panics when it is exhausted. try to warn the user that
+  * they need to boost the size of their pool.
+  */
+-#if defined(CONFIG_SWIOTLB) && !defined(GFP_DMA32)
++#if defined(CONFIG_SWIOTLB) && !defined(GFP_DMA32) && !defined(CONFIG_XEN)
+ #define NV_SWIOTLB 1
+ #endif
+ 
+@@ -776,7 +776,10 @@
+ #define NV_VM_INSERT_PAGE(vma, addr, page) \
+     vm_insert_page(vma, addr, page)
+ #endif
+-#if defined(NV_REMAP_PFN_RANGE_PRESENT)
++#if defined(CONFIG_XEN)
++#define NV_REMAP_PAGE_RANGE(from, offset, x...) \
++    io_remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)
++#elif defined(NV_REMAP_PFN_RANGE_PRESENT)
+ #define NV_REMAP_PAGE_RANGE(from, offset, x...) \
+     remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)
+ #elif defined(NV_REMAP_PAGE_RANGE_5_PRESENT)
+@@ -788,6 +791,9 @@
+ #define NV_REMAP_PAGE_RANGE(x...) remap_page_range(x)
+ #endif
+ 
++#if !defined(CONFIG_XEN)
++#define phys_to_machine(x) x
++#endif
+ 
+ #define NV_PGD_OFFSET(address, kernel, mm)              \
+    ({                                                   \
+Index: usr/src/nv/nv-vm.c
+===================================================================
+--- usr/src/nv/nv-vm.c.orig	2007-03-26 15:58:49.000000000 +0200
++++ usr/src/nv/nv-vm.c	2007-03-26 15:59:11.000000000 +0200
+@@ -352,6 +352,9 @@
+ 
+ static void nv_flush_caches(void)
+ {
++#if defined(CONFIG_PREEMPT_RT)
++    if(!nv_pat_enabled) return;
++#endif
+ #if defined(KERNEL_2_4)
+     // for 2.4 kernels, just automatically flush the caches and invalidate tlbs
+     nv_execute_on_all_cpus(cache_flush, NULL);
+@@ -502,7 +505,7 @@
+         page_ptr->phys_addr = phys_addr;
+         page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
+         page_ptr->virt_addr = virt_addr;
+-        page_ptr->dma_addr = page_ptr->phys_addr;
++        page_ptr->dma_addr = phys_to_machine(page_ptr->phys_addr);
+ 
+         /* lock the page for dma purposes */
+         nv_lock_page(page_ptr);
+Index: usr/src/nv/nv.c
+===================================================================
+--- usr/src/nv/nv.c.orig	2007-03-26 15:58:49.000000000 +0200
++++ usr/src/nv/nv.c	2007-03-26 15:59:11.000000000 +0200
+@@ -42,8 +42,26 @@
+ 
+ int nv_pat_enabled = 0;
+ 
++/*
++ * disable PAT support if XEN or PREEMPT_RT is configured in kernel
++ */
++
++#if defined(CONFIG_XEN) || defined(CONFIG_PREEMPT_RT)
++static int nv_disable_pat = 1;
++#else
+ static int nv_disable_pat = 0;
++#endif
++
++/*
++ * you can re-enable PAT support for PREEMPT_RT when applying
++ * "nv_disable_pat=0" as kernel parameter for the sake of slightly
++ * better 3D performance but at the expense of higher latencies.
++ * if XEN is configured, then PAT support can't be enabled!
++ */
++
++#if !defined(CONFIG_XEN)
+ NV_MODULE_PARAMETER(nv_disable_pat);
++#endif
+ 
+ #if defined(NVCPU_X86) || defined(NVCPU_X86_64)
+ NvU64 __nv_supported_pte_mask = ~_PAGE_NX;
+Index: usr/src/nv/os-agp.c
+===================================================================
+--- usr/src/nv/os-agp.c.orig	2007-03-26 15:58:49.000000000 +0200
++++ usr/src/nv/os-agp.c	2007-03-26 15:59:11.000000000 +0200
+@@ -286,7 +286,7 @@
+ 
+          page_ptr->phys_addr = (ptr->memory[i] & PAGE_MASK);
+          page_ptr->virt_addr = (unsigned long) __va(page_ptr->phys_addr);
+-         page_ptr->dma_addr  = page_ptr->phys_addr;
++         page_ptr->dma_addr  = phys_to_machine(page_ptr->phys_addr);
+     }
+ 
+     return RM_OK;
+Index: usr/src/nv/os-interface.c
+===================================================================
+--- usr/src/nv/os-interface.c.orig	2007-03-26 15:58:49.000000000 +0200
++++ usr/src/nv/os-interface.c	2007-03-26 15:59:11.000000000 +0200
+@@ -533,6 +533,7 @@
+     MicroSeconds = MilliSeconds * 1000;
+     tm_end.tv_usec = MicroSeconds;
+     tm_end.tv_sec = 0;
++#if !defined(CONFIG_XEN)
+     NV_TIMERADD(&tm_aux, &tm_end, &tm_end);
+ 
+     /* do we have a full jiffie to wait? */
+@@ -570,6 +571,7 @@
+                 MicroSeconds = 0;
+         } while ((jiffies = NV_USECS_TO_JIFFIES(MicroSeconds)) != 0);
+     }
++#endif
+ 
+     if (MicroSeconds > 1000)
+     {
+Index: usr/src/nv/conftest.sh
+===================================================================
+--- usr/src/nv/conftest.sh.orig	2007-03-26 15:58:49.000000000 +0200
++++ usr/src/nv/conftest.sh	2007-03-26 15:59:11.000000000 +0200
+@@ -901,7 +901,8 @@
+         # Check if the target kernel is a Xen kernel. If so, then exit, since
+         # the driver doesn't currently work with Xen.
+         #
+-        RET=1
++        ##MNENCIA## Switched to 0 because we have the xenrt patch :-)
++        RET=0
+         VERBOSE=$6
+         FILE="linux/autoconf.h"
+ 




More information about the Pkg-nvidia-devel mailing list